diff --git a/CHANGELOG.md b/CHANGELOG.md index 6940a81dd72..c4068c1c819 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,19 +37,45 @@ Ref: https://keepachangelog.com/en/1.0.0/ ## [Unreleased] +## v0.45.9 - 2022-10-14 + +ATTENTION: + +This is a security release for the +[Dragonberry security advisory](https://forum.cosmos.network/t/ibc-security-advisory-dragonberry/7702). + +All users should upgrade immediately. + +Users *must* add a replace directive in their go.mod for the +new `ics23` package in the SDK: + +``` +replace ( + github.com/confio/ics23/go => github.com/cosmos/cosmos-sdk/ics23 +) + +``` + +### Features + +* [#13435](https://github.com/cosmos/cosmos-sdk/pull/13435) Extend error context when a simulation fails. + ### Improvements * [#13369](https://github.com/cosmos/cosmos-sdk/pull/13369) Improve UX for `keyring.List` by returning all retrieved keys. * [#13323](https://github.com/cosmos/cosmos-sdk/pull/13323) Ensure `withdraw_rewards` rewards are emitted from all actions that result in rewards being withdrawn. * [#13321](https://github.com/cosmos/cosmos-sdk/pull/13321) Add flag to disable fast node migration and usage. * (store) [#13326](https://github.com/cosmos/cosmos-sdk/pull/13326) Implementation of ADR-038 file StreamingService, backport #8664. +* (store) [#13540](https://github.com/cosmos/cosmos-sdk/pull/13540) Default fastnode migration to false to prevent suprises. Operators must enable it, unless they have it enabled already. ### API Breaking Changes -- (cli) [#13089](https://github.com/cosmos/cosmos-sdk/pull/13089) Fix rollback command don't actually delete multistore versions, added method `RollbackToVersion` to interface `CommitMultiStore` and added method `CommitMultiStore` to `Application` interface. +* (cli) [#13089](https://github.com/cosmos/cosmos-sdk/pull/13089) Fix rollback command don't actually delete multistore versions, added method `RollbackToVersion` to interface `CommitMultiStore` and added method `CommitMultiStore` to `Application` interface. ### Bug Fixes +* [#...](https://github.com/cosmos/cosmos-sdk/pull/) Implement dragonberry security patch. + * For applying the patch please refer to the [RELEASE NOTES](./RELEASE_NOTES.md) * (store) [#13459](https://github.com/cosmos/cosmos-sdk/pull/13459) Don't let state listener observe the uncommitted writes. ## v0.45.8 - 2022-08-25 @@ -86,9 +112,6 @@ Ref: https://keepachangelog.com/en/1.0.0/ ### Bug Fixes * (x/mint) [#12384](https://github.com/cosmos/cosmos-sdk/pull/12384) Ensure `GoalBonded` must be positive when performing `x/mint` parameter validation. -* (simapp) [#12437](https://github.com/cosmos/cosmos-sdk/pull/12437) fix the non-determinstic behavior in simulations caused by `GenTx` and check -empty coins slice before it is used to create `banktype.MsgSend`. -* (x/capability) [12818](https://github.com/cosmos/cosmos-sdk/pull/12818) Use fixed length hex for pointer at FwdCapabilityKey. ## [v0.45.6](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.45.6) - 2022-06-28 diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index 1584f447c4e..9c3a549cb9d 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -1,7 +1,20 @@ -# Cosmos SDK v0.45.8 Release Notes +# Cosmos SDK v0.45.9 Release Notes -This release introduces few improvements, such as the speed-up of the crisis invariant checks (thanks to a Juno bounty), and updated Tendermint and IAVL dependencies. +This is a security release for the +[Dragonberry security advisory](https://forum.cosmos.network/t/ibc-security-advisory-dragonberry/7702). +Please upgrade ASAP. -See the [Cosmos SDK v0.45.8 Changelog](https://github.com/cosmos/cosmos-sdk/blob/v0.45.8/CHANGELOG.md) for the exhaustive list of all changes. +Next to this, we have also included a few minor bugfixes. -**Full Commit History**: https://github.com/cosmos/cosmos-sdk/compare/v0.45.7...v0.45.8 +Chains must add the following to their go.mod for the application: + +```go +replace github.com/confio/ics23/go => github.com/cosmos/cosmos-sdk/ics23 +``` + +Bumping the SDK version should be smooth, however, feel free to tag core devs to review your upgrading PR: + +- **CET**: @tac0turtle, @okwme, @AdityaSripal, @colin-axner, @julienrbrt +- **EST**: @ebuchman, @alexanderbez, @aaronc +- **PST**: @jtremback, @nicolaslara, @czarcas7ic, @p0mvn +- **CDT**: @ValarDragon, @zmanian diff --git a/baseapp/abci.go b/baseapp/abci.go index 9e54d8138e9..b934255da46 100644 --- a/baseapp/abci.go +++ b/baseapp/abci.go @@ -2,6 +2,7 @@ package baseapp import ( "crypto/sha256" + "encoding/json" "errors" "fmt" "os" @@ -122,24 +123,6 @@ func (app *BaseApp) SetOption(req abci.RequestSetOption) (res abci.ResponseSetOp return } -// FilterPeerByAddrPort filters peers by address/port. -func (app *BaseApp) FilterPeerByAddrPort(info string) abci.ResponseQuery { - if app.addrPeerFilter != nil { - return app.addrPeerFilter(info) - } - - return abci.ResponseQuery{} -} - -// FilterPeerByID filters peers by node ID. -func (app *BaseApp) FilterPeerByID(info string) abci.ResponseQuery { - if app.idPeerFilter != nil { - return app.idPeerFilter(info) - } - - return abci.ResponseQuery{} -} - // BeginBlock implements the ABCI application interface. func (app *BaseApp) BeginBlock(req abci.RequestBeginBlock) (res abci.ResponseBeginBlock) { defer telemetry.MeasureSince(time.Now(), "abci", "begin_block") @@ -805,6 +788,22 @@ func handleQueryApp(app *BaseApp, path []string, req abci.RequestQuery) abci.Res Value: []byte(app.version), } + case "snapshots": + var responseValue []byte + + response := app.ListSnapshots(abci.RequestListSnapshots{}) + + responseValue, err := json.Marshal(response) + if err != nil { + sdkerrors.QueryResult(sdkerrors.Wrap(err, fmt.Sprintf("failed to marshal list snapshots response %v", response))) + } + + return abci.ResponseQuery{ + Codespace: sdkerrors.RootCodespace, + Height: req.Height, + Value: responseValue, + } + default: return sdkerrors.QueryResultWithDebug(sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "unknown query: %s", path), app.trace) } @@ -840,35 +839,6 @@ func handleQueryStore(app *BaseApp, path []string, req abci.RequestQuery) abci.R return resp } -func handleQueryP2P(app *BaseApp, path []string) abci.ResponseQuery { - // "/p2p" prefix for p2p queries - if len(path) < 4 { - return sdkerrors.QueryResultWithDebug( - sdkerrors.Wrap( - sdkerrors.ErrUnknownRequest, "path should be p2p filter ", - ), app.trace) - } - - var resp abci.ResponseQuery - - cmd, typ, arg := path[1], path[2], path[3] - switch cmd { - case "filter": - switch typ { - case "addr": - resp = app.FilterPeerByAddrPort(arg) - - case "id": - resp = app.FilterPeerByID(arg) - } - - default: - resp = sdkerrors.QueryResultWithDebug(sdkerrors.Wrap(sdkerrors.ErrUnknownRequest, "expected second parameter to be 'filter'"), app.trace) - } - - return resp -} - func handleQueryCustom(app *BaseApp, path []string, req abci.RequestQuery) abci.ResponseQuery { // path[0] should be "custom" because "/custom" prefix is required for keeper // queries. diff --git a/baseapp/abci_test.go b/baseapp/abci_test.go index b382a5a3890..e6ce5c9fafa 100644 --- a/baseapp/abci_test.go +++ b/baseapp/abci_test.go @@ -1,6 +1,7 @@ package baseapp import ( + "encoding/json" "testing" "github.com/stretchr/testify/require" @@ -159,3 +160,40 @@ func TestBaseAppCreateQueryContext(t *testing.T) { }) } } + +type paramStore struct { + db *dbm.MemDB +} + +func (ps *paramStore) Set(_ sdk.Context, key []byte, value interface{}) { + bz, err := json.Marshal(value) + if err != nil { + panic(err) + } + + ps.db.Set(key, bz) +} + +func (ps *paramStore) Has(_ sdk.Context, key []byte) bool { + ok, err := ps.db.Has(key) + if err != nil { + panic(err) + } + + return ok +} + +func (ps *paramStore) Get(_ sdk.Context, key []byte, ptr interface{}) { + bz, err := ps.db.Get(key) + if err != nil { + panic(err) + } + + if len(bz) == 0 { + return + } + + if err := json.Unmarshal(bz, ptr); err != nil { + panic(err) + } +} diff --git a/baseapp/baseapp.go b/baseapp/baseapp.go index 4c2d5e5df30..b51f7805bdf 100644 --- a/baseapp/baseapp.go +++ b/baseapp/baseapp.go @@ -46,29 +46,18 @@ type ( type BaseApp struct { //nolint: maligned // initialized on creation logger log.Logger - name string // application name from abci.Info - db dbm.DB // common DB backend - cms sdk.CommitMultiStore // Main (uncached) state - storeLoader StoreLoader // function to handle store loading, may be overridden with SetStoreLoader() - router sdk.Router // handle any kind of message - queryRouter sdk.QueryRouter // router for redirecting query calls - grpcQueryRouter *GRPCQueryRouter // router for redirecting gRPC query calls - msgServiceRouter *MsgServiceRouter // router for redirecting Msg service messages + name string // application name from abci.Info interfaceRegistry types.InterfaceRegistry txDecoder sdk.TxDecoder // unmarshal []byte into sdk.Tx - anteHandler sdk.AnteHandler // ante handler for fee and auth - initChainer sdk.InitChainer // initialize state with validators and state blob - beginBlocker sdk.BeginBlocker // logic to run before any txs - endBlocker sdk.EndBlocker // logic to run after all txs, and to determine valset changes - addrPeerFilter sdk.PeerFilter // filter peers by address and port - idPeerFilter sdk.PeerFilter // filter peers by node ID - fauxMerkleMode bool // if true, IAVL MountStores uses MountStoresDB for simulation speed. + anteHandler sdk.AnteHandler // ante handler for fee and auth - // manages snapshots, i.e. dumps of app state at certain intervals - snapshotManager *snapshots.Manager - snapshotInterval uint64 // block interval between state sync snapshots - snapshotKeepRecent uint32 // recent state sync snapshots to keep + appStore + baseappVersions + peerFilters + snapshotData + abciData + moduleRouter // volatile states: // @@ -77,12 +66,6 @@ type BaseApp struct { //nolint: maligned checkState *state // for CheckTx deliverState *state // for DeliverTx - // an inter-block write-through cache provided to the context during deliverState - interBlockCache sdk.MultiStorePersistentCache - - // absent validators from begin block - voteInfos []abci.VoteInfo - // paramStore is used to query for ABCI consensus parameters from an // application parameter store. paramStore ParamStore @@ -115,13 +98,6 @@ type BaseApp struct { //nolint: maligned // ResponseCommit.RetainHeight. minRetainBlocks uint64 - // application's version string - version string - - // application's protocol version that increments on every upgrade - // if BaseApp is passed to the upgrade keeper's NewKeeper method. - appVersion uint64 - // recovery handler for app.runTx method runTxRecoveryMiddleware recoveryMiddleware @@ -137,6 +113,51 @@ type BaseApp struct { //nolint: maligned abciListeners []ABCIListener } +type appStore struct { + db dbm.DB // common DB backend + cms sdk.CommitMultiStore // Main (uncached) state + storeLoader StoreLoader // function to handle store loading, may be overridden with SetStoreLoader() + + // an inter-block write-through cache provided to the context during deliverState + interBlockCache sdk.MultiStorePersistentCache + + fauxMerkleMode bool // if true, IAVL MountStores uses MountStoresDB for simulation speed. +} + +type moduleRouter struct { + router sdk.Router // handle any kind of message + queryRouter sdk.QueryRouter // router for redirecting query calls + grpcQueryRouter *GRPCQueryRouter // router for redirecting gRPC query calls + msgServiceRouter *MsgServiceRouter // router for redirecting Msg service messages +} + +type abciData struct { + initChainer sdk.InitChainer // initialize state with validators and state blob + beginBlocker sdk.BeginBlocker // logic to run before any txs + endBlocker sdk.EndBlocker // logic to run after all txs, and to determine valset changes + + // absent validators from begin block + voteInfos []abci.VoteInfo +} + +type baseappVersions struct { + // application's version string + version string + + // application's protocol version that increments on every upgrade + // if BaseApp is passed to the upgrade keeper's NewKeeper method. + appVersion uint64 +} + +// should really get handled in some db struct +// which then has a sub-item, persistence fields +type snapshotData struct { + // manages snapshots, i.e. dumps of app state at certain intervals + snapshotManager *snapshots.Manager + snapshotInterval uint64 // block interval between state sync snapshots + snapshotKeepRecent uint32 // recent state sync snapshots to keep +} + // NewBaseApp returns a reference to an initialized BaseApp. It accepts a // variadic number of option functions, which act on the BaseApp to set // configuration choices. @@ -146,17 +167,21 @@ func NewBaseApp( name string, logger log.Logger, db dbm.DB, txDecoder sdk.TxDecoder, options ...func(*BaseApp), ) *BaseApp { app := &BaseApp{ - logger: logger, - name: name, - db: db, - cms: store.NewCommitMultiStore(db), - storeLoader: DefaultStoreLoader, - router: NewRouter(), - queryRouter: NewQueryRouter(), - grpcQueryRouter: NewGRPCQueryRouter(), - msgServiceRouter: NewMsgServiceRouter(), - txDecoder: txDecoder, - fauxMerkleMode: false, + logger: logger, + name: name, + appStore: appStore{ + db: db, + cms: store.NewCommitMultiStore(db), + storeLoader: DefaultStoreLoader, + fauxMerkleMode: false, + }, + moduleRouter: moduleRouter{ + router: NewRouter(), + queryRouter: NewQueryRouter(), + grpcQueryRouter: NewGRPCQueryRouter(), + msgServiceRouter: NewMsgServiceRouter(), + }, + txDecoder: txDecoder, } for _, option := range options { diff --git a/baseapp/baseapp_test.go b/baseapp/baseapp_test.go index 068cde4a0be..f9cdfcb7143 100644 --- a/baseapp/baseapp_test.go +++ b/baseapp/baseapp_test.go @@ -1,18 +1,10 @@ package baseapp import ( - "bytes" - "encoding/binary" "encoding/json" - "fmt" - "math/rand" "os" - "strings" - "sync" "testing" - "time" - "github.com/gogo/protobuf/jsonpb" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" @@ -21,13 +13,8 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/codec" - "github.com/cosmos/cosmos-sdk/snapshots" - snapshottypes "github.com/cosmos/cosmos-sdk/snapshots/types" - "github.com/cosmos/cosmos-sdk/store/rootmulti" store "github.com/cosmos/cosmos-sdk/store/types" - "github.com/cosmos/cosmos-sdk/testutil/testdata" sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" "github.com/cosmos/cosmos-sdk/x/auth/legacy/legacytx" ) @@ -36,43 +23,6 @@ var ( capKey2 = sdk.NewKVStoreKey("key2") ) -type paramStore struct { - db *dbm.MemDB -} - -func (ps *paramStore) Set(_ sdk.Context, key []byte, value interface{}) { - bz, err := json.Marshal(value) - if err != nil { - panic(err) - } - - ps.db.Set(key, bz) -} - -func (ps *paramStore) Has(_ sdk.Context, key []byte) bool { - ok, err := ps.db.Has(key) - if err != nil { - panic(err) - } - - return ok -} - -func (ps *paramStore) Get(_ sdk.Context, key []byte, ptr interface{}) { - bz, err := ps.db.Get(key) - if err != nil { - panic(err) - } - - if len(bz) == 0 { - return - } - - if err := json.Unmarshal(bz, ptr); err != nil { - panic(err) - } -} - func defaultLogger() log.Logger { return log.NewTMLogger(log.NewSyncWriter(os.Stdout)).With("module", "sdk/app") } @@ -119,278 +69,6 @@ func setupBaseApp(t *testing.T, options ...func(*BaseApp)) *BaseApp { return app } -// simple one store baseapp with data and snapshots. Each tx is 1 MB in size (uncompressed). -func setupBaseAppWithSnapshots(t *testing.T, blocks uint, blockTxs int, options ...func(*BaseApp)) (*BaseApp, func()) { - codec := codec.NewLegacyAmino() - registerTestCodec(codec) - routerOpt := func(bapp *BaseApp) { - bapp.Router().AddRoute(sdk.NewRoute(routeMsgKeyValue, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - kv := msg.(*msgKeyValue) - bapp.cms.GetCommitKVStore(capKey2).Set(kv.Key, kv.Value) - return &sdk.Result{}, nil - })) - } - - snapshotInterval := uint64(2) - snapshotTimeout := 1 * time.Minute - snapshotDir, err := os.MkdirTemp("", "baseapp") - require.NoError(t, err) - snapshotStore, err := snapshots.NewStore(dbm.NewMemDB(), snapshotDir) - require.NoError(t, err) - teardown := func() { - os.RemoveAll(snapshotDir) - } - - app := setupBaseApp(t, append(options, - SetSnapshotStore(snapshotStore), - SetSnapshotInterval(snapshotInterval), - SetPruning(sdk.PruningOptions{KeepEvery: 1}), - routerOpt)...) - - app.InitChain(abci.RequestInitChain{}) - - r := rand.New(rand.NewSource(3920758213583)) - keyCounter := 0 - for height := int64(1); height <= int64(blocks); height++ { - app.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{Height: height}}) - for txNum := 0; txNum < blockTxs; txNum++ { - tx := txTest{Msgs: []sdk.Msg{}} - for msgNum := 0; msgNum < 100; msgNum++ { - key := []byte(fmt.Sprintf("%v", keyCounter)) - value := make([]byte, 10000) - _, err := r.Read(value) - require.NoError(t, err) - tx.Msgs = append(tx.Msgs, msgKeyValue{Key: key, Value: value}) - keyCounter++ - } - txBytes, err := codec.Marshal(tx) - require.NoError(t, err) - resp := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.True(t, resp.IsOK(), "%v", resp.String()) - } - app.EndBlock(abci.RequestEndBlock{Height: height}) - app.Commit() - - // Wait for snapshot to be taken, since it happens asynchronously. - if uint64(height)%snapshotInterval == 0 { - start := time.Now() - for { - if time.Since(start) > snapshotTimeout { - t.Errorf("timed out waiting for snapshot after %v", snapshotTimeout) - } - snapshot, err := snapshotStore.Get(uint64(height), snapshottypes.CurrentFormat) - require.NoError(t, err) - if snapshot != nil { - break - } - time.Sleep(100 * time.Millisecond) - } - } - } - - return app, teardown -} - -func TestMountStores(t *testing.T) { - app := setupBaseApp(t) - - // check both stores - store1 := app.cms.GetCommitKVStore(capKey1) - require.NotNil(t, store1) - store2 := app.cms.GetCommitKVStore(capKey2) - require.NotNil(t, store2) -} - -// Test that we can make commits and then reload old versions. -// Test that LoadLatestVersion actually does. -func TestLoadVersion(t *testing.T) { - logger := defaultLogger() - pruningOpt := SetPruning(store.PruneNothing) - db := dbm.NewMemDB() - name := t.Name() - app := NewBaseApp(name, logger, db, nil, pruningOpt) - - // make a cap key and mount the store - err := app.LoadLatestVersion() // needed to make stores non-nil - require.Nil(t, err) - - emptyCommitID := sdk.CommitID{} - - // fresh store has zero/empty last commit - lastHeight := app.LastBlockHeight() - lastID := app.LastCommitID() - require.Equal(t, int64(0), lastHeight) - require.Equal(t, emptyCommitID, lastID) - - // execute a block, collect commit ID - header := tmproto.Header{Height: 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - res := app.Commit() - commitID1 := sdk.CommitID{Version: 1, Hash: res.Data} - - // execute a block, collect commit ID - header = tmproto.Header{Height: 2} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - res = app.Commit() - commitID2 := sdk.CommitID{Version: 2, Hash: res.Data} - - // reload with LoadLatestVersion - app = NewBaseApp(name, logger, db, nil, pruningOpt) - app.MountStores() - err = app.LoadLatestVersion() - require.Nil(t, err) - testLoadVersionHelper(t, app, int64(2), commitID2) - - // reload with LoadVersion, see if you can commit the same block and get - // the same result - app = NewBaseApp(name, logger, db, nil, pruningOpt) - err = app.LoadVersion(1) - require.Nil(t, err) - testLoadVersionHelper(t, app, int64(1), commitID1) - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - app.Commit() - testLoadVersionHelper(t, app, int64(2), commitID2) -} - -func useDefaultLoader(app *BaseApp) { - app.SetStoreLoader(DefaultStoreLoader) -} - -func initStore(t *testing.T, db dbm.DB, storeKey string, k, v []byte) { - rs := rootmulti.NewStore(db, log.NewNopLogger()) - rs.SetPruning(store.PruneNothing) - key := sdk.NewKVStoreKey(storeKey) - rs.MountStoreWithDB(key, store.StoreTypeIAVL, nil) - err := rs.LoadLatestVersion() - require.Nil(t, err) - require.Equal(t, int64(0), rs.LastCommitID().Version) - - // write some data in substore - kv, _ := rs.GetStore(key).(store.KVStore) - require.NotNil(t, kv) - kv.Set(k, v) - commitID := rs.Commit() - require.Equal(t, int64(1), commitID.Version) -} - -func checkStore(t *testing.T, db dbm.DB, ver int64, storeKey string, k, v []byte) { - rs := rootmulti.NewStore(db, log.NewNopLogger()) - rs.SetPruning(store.PruneDefault) - key := sdk.NewKVStoreKey(storeKey) - rs.MountStoreWithDB(key, store.StoreTypeIAVL, nil) - err := rs.LoadLatestVersion() - require.Nil(t, err) - require.Equal(t, ver, rs.LastCommitID().Version) - - // query data in substore - kv, _ := rs.GetStore(key).(store.KVStore) - require.NotNil(t, kv) - require.Equal(t, v, kv.Get(k)) -} - -// Test that we can make commits and then reload old versions. -// Test that LoadLatestVersion actually does. -func TestSetLoader(t *testing.T) { - cases := map[string]struct { - setLoader func(*BaseApp) - origStoreKey string - loadStoreKey string - }{ - "don't set loader": { - origStoreKey: "foo", - loadStoreKey: "foo", - }, - "default loader": { - setLoader: useDefaultLoader, - origStoreKey: "foo", - loadStoreKey: "foo", - }, - } - - k := []byte("key") - v := []byte("value") - - for name, tc := range cases { - tc := tc - t.Run(name, func(t *testing.T) { - // prepare a db with some data - db := dbm.NewMemDB() - initStore(t, db, tc.origStoreKey, k, v) - - // load the app with the existing db - opts := []func(*BaseApp){SetPruning(store.PruneNothing)} - if tc.setLoader != nil { - opts = append(opts, tc.setLoader) - } - app := NewBaseApp(t.Name(), defaultLogger(), db, nil, opts...) - app.MountStores(sdk.NewKVStoreKey(tc.loadStoreKey)) - err := app.LoadLatestVersion() - require.Nil(t, err) - - // "execute" one block - app.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{Height: 2}}) - res := app.Commit() - require.NotNil(t, res.Data) - - // check db is properly updated - checkStore(t, db, 2, tc.loadStoreKey, k, v) - checkStore(t, db, 2, tc.loadStoreKey, []byte("foo"), nil) - }) - } -} - -func TestVersionSetterGetter(t *testing.T) { - logger := defaultLogger() - pruningOpt := SetPruning(store.PruneDefault) - db := dbm.NewMemDB() - name := t.Name() - app := NewBaseApp(name, logger, db, nil, pruningOpt) - - require.Equal(t, "", app.Version()) - res := app.Query(abci.RequestQuery{Path: "app/version"}) - require.True(t, res.IsOK()) - require.Equal(t, "", string(res.Value)) - - versionString := "1.0.0" - app.SetVersion(versionString) - require.Equal(t, versionString, app.Version()) - res = app.Query(abci.RequestQuery{Path: "app/version"}) - require.True(t, res.IsOK()) - require.Equal(t, versionString, string(res.Value)) -} - -func TestLoadVersionInvalid(t *testing.T) { - logger := log.NewNopLogger() - pruningOpt := SetPruning(store.PruneNothing) - db := dbm.NewMemDB() - name := t.Name() - app := NewBaseApp(name, logger, db, nil, pruningOpt) - - err := app.LoadLatestVersion() - require.Nil(t, err) - - // require error when loading an invalid version - err = app.LoadVersion(-1) - require.Error(t, err) - - header := tmproto.Header{Height: 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - res := app.Commit() - commitID1 := sdk.CommitID{Version: 1, Hash: res.Data} - - // create a new app with the stores mounted under the same cap key - app = NewBaseApp(name, logger, db, nil, pruningOpt) - - // require we can load the latest version - err = app.LoadVersion(1) - require.Nil(t, err) - testLoadVersionHelper(t, app, int64(1), commitID1) - - // require error when loading an invalid version - err = app.LoadVersion(2) - require.Error(t, err) -} - func TestLoadVersionPruning(t *testing.T) { logger := log.NewNopLogger() pruningOptions := store.PruningOptions{ @@ -454,1587 +132,65 @@ func testLoadVersionHelper(t *testing.T, app *BaseApp, expectedHeight int64, exp require.Equal(t, expectedID, lastID) } -func TestOptionFunction(t *testing.T) { - logger := defaultLogger() - db := dbm.NewMemDB() - bap := NewBaseApp("starting name", logger, db, nil, testChangeNameHelper("new name")) - require.Equal(t, bap.name, "new name", "BaseApp should have had name changed via option function") -} - -func testChangeNameHelper(name string) func(*BaseApp) { - return func(bap *BaseApp) { - bap.name = name - } -} - -// Test that txs can be unmarshalled and read and that -// correct error codes are returned when not -func TestTxDecoder(t *testing.T) { - codec := codec.NewLegacyAmino() - registerTestCodec(codec) - - app := newBaseApp(t.Name()) - tx := newTxCounter(1, 0) - txBytes := codec.MustMarshal(tx) - - dTx, err := app.txDecoder(txBytes) - require.NoError(t, err) - - cTx := dTx.(txTest) - require.Equal(t, tx.Counter, cTx.Counter) -} - -// Test that Info returns the latest committed state. -func TestInfo(t *testing.T) { - app := newBaseApp(t.Name()) - - // ----- test an empty response ------- - reqInfo := abci.RequestInfo{} - res := app.Info(reqInfo) - - // should be empty - assert.Equal(t, "", res.Version) - assert.Equal(t, t.Name(), res.GetData()) - assert.Equal(t, int64(0), res.LastBlockHeight) - require.Equal(t, []uint8(nil), res.LastBlockAppHash) - require.Equal(t, app.AppVersion(), res.AppVersion) - // ----- test a proper response ------- - // TODO -} - -func TestBaseAppOptionSeal(t *testing.T) { - app := setupBaseApp(t) - - require.Panics(t, func() { - app.SetName("") - }) - require.Panics(t, func() { - app.SetVersion("") - }) - require.Panics(t, func() { - app.SetDB(nil) - }) - require.Panics(t, func() { - app.SetCMS(nil) - }) - require.Panics(t, func() { - app.SetInitChainer(nil) - }) - require.Panics(t, func() { - app.SetBeginBlocker(nil) - }) - require.Panics(t, func() { - app.SetEndBlocker(nil) - }) - require.Panics(t, func() { - app.SetAnteHandler(nil) - }) - require.Panics(t, func() { - app.SetAddrPeerFilter(nil) - }) - require.Panics(t, func() { - app.SetIDPeerFilter(nil) - }) - require.Panics(t, func() { - app.SetFauxMerkleMode() - }) - require.Panics(t, func() { - app.SetRouter(NewRouter()) - }) -} - func TestSetMinGasPrices(t *testing.T) { minGasPrices := sdk.DecCoins{sdk.NewInt64DecCoin("stake", 5000)} app := newBaseApp(t.Name(), SetMinGasPrices(minGasPrices.String())) require.Equal(t, minGasPrices, app.minGasPrices) } -func TestInitChainer(t *testing.T) { - name := t.Name() - // keep the db and logger ourselves so - // we can reload the same app later - db := dbm.NewMemDB() - logger := defaultLogger() - app := NewBaseApp(name, logger, db, nil) - capKey := sdk.NewKVStoreKey("main") - capKey2 := sdk.NewKVStoreKey("key2") - app.MountStores(capKey, capKey2) - - // set a value in the store on init chain - key, value := []byte("hello"), []byte("goodbye") - var initChainer sdk.InitChainer = func(ctx sdk.Context, req abci.RequestInitChain) abci.ResponseInitChain { - store := ctx.KVStore(capKey) - store.Set(key, value) - return abci.ResponseInitChain{} - } - - query := abci.RequestQuery{ - Path: "/store/main/key", - Data: key, - } - - // initChainer is nil - nothing happens +func TestGetMaximumBlockGas(t *testing.T) { + app := setupBaseApp(t) app.InitChain(abci.RequestInitChain{}) - res := app.Query(query) - require.Equal(t, 0, len(res.Value)) - - // set initChainer and try again - should see the value - app.SetInitChainer(initChainer) - - // stores are mounted and private members are set - sealing baseapp - err := app.LoadLatestVersion() // needed to make stores non-nil - require.Nil(t, err) - require.Equal(t, int64(0), app.LastBlockHeight()) - - initChainRes := app.InitChain(abci.RequestInitChain{AppStateBytes: []byte("{}"), ChainId: "test-chain-id"}) // must have valid JSON genesis file, even if empty - - // The AppHash returned by a new chain is the sha256 hash of "". - // $ echo -n '' | sha256sum - // e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 - require.Equal( - t, - []byte{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}, - initChainRes.AppHash, - ) - - // assert that chainID is set correctly in InitChain - chainID := app.deliverState.ctx.ChainID() - require.Equal(t, "test-chain-id", chainID, "ChainID in deliverState not set correctly in InitChain") - - chainID = app.checkState.ctx.ChainID() - require.Equal(t, "test-chain-id", chainID, "ChainID in checkState not set correctly in InitChain") - - app.Commit() - res = app.Query(query) - require.Equal(t, int64(1), app.LastBlockHeight()) - require.Equal(t, value, res.Value) - - // reload app - app = NewBaseApp(name, logger, db, nil) - app.SetInitChainer(initChainer) - app.MountStores(capKey, capKey2) - err = app.LoadLatestVersion() // needed to make stores non-nil - require.Nil(t, err) - require.Equal(t, int64(1), app.LastBlockHeight()) - - // ensure we can still query after reloading - res = app.Query(query) - require.Equal(t, value, res.Value) - - // commit and ensure we can still query - header := tmproto.Header{Height: app.LastBlockHeight() + 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - app.Commit() - - res = app.Query(query) - require.Equal(t, value, res.Value) -} - -func TestInitChain_WithInitialHeight(t *testing.T) { - name := t.Name() - db := dbm.NewMemDB() - logger := defaultLogger() - app := NewBaseApp(name, logger, db, nil) - - app.InitChain( - abci.RequestInitChain{ - InitialHeight: 3, - }, - ) - app.Commit() - - require.Equal(t, int64(3), app.LastBlockHeight()) -} - -func TestBeginBlock_WithInitialHeight(t *testing.T) { - name := t.Name() - db := dbm.NewMemDB() - logger := defaultLogger() - app := NewBaseApp(name, logger, db, nil) - - app.InitChain( - abci.RequestInitChain{ - InitialHeight: 3, - }, - ) - - require.PanicsWithError(t, "invalid height: 4; expected: 3", func() { - app.BeginBlock(abci.RequestBeginBlock{ - Header: tmproto.Header{ - Height: 4, - }, - }) - }) - - app.BeginBlock(abci.RequestBeginBlock{ - Header: tmproto.Header{ - Height: 3, - }, - }) - app.Commit() - - require.Equal(t, int64(3), app.LastBlockHeight()) -} - -// Simple tx with a list of Msgs. -type txTest struct { - Msgs []sdk.Msg - Counter int64 - FailOnAnte bool -} - -func (tx *txTest) setFailOnAnte(fail bool) { - tx.FailOnAnte = fail -} - -func (tx *txTest) setFailOnHandler(fail bool) { - for i, msg := range tx.Msgs { - tx.Msgs[i] = msgCounter{msg.(msgCounter).Counter, fail} - } -} - -// Implements Tx -func (tx txTest) GetMsgs() []sdk.Msg { return tx.Msgs } -func (tx txTest) ValidateBasic() error { return nil } - -const ( - routeMsgCounter = "msgCounter" - routeMsgCounter2 = "msgCounter2" - routeMsgKeyValue = "msgKeyValue" -) - -// ValidateBasic() fails on negative counters. -// Otherwise it's up to the handlers -type msgCounter struct { - Counter int64 - FailOnHandler bool -} - -// dummy implementation of proto.Message -func (msg msgCounter) Reset() {} -func (msg msgCounter) String() string { return "TODO" } -func (msg msgCounter) ProtoMessage() {} - -// Implements Msg -func (msg msgCounter) Route() string { return routeMsgCounter } -func (msg msgCounter) Type() string { return "counter1" } -func (msg msgCounter) GetSignBytes() []byte { return nil } -func (msg msgCounter) GetSigners() []sdk.AccAddress { return nil } -func (msg msgCounter) ValidateBasic() error { - if msg.Counter >= 0 { - return nil - } - return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "counter should be a non-negative integer") -} - -func newTxCounter(counter int64, msgCounters ...int64) *txTest { - msgs := make([]sdk.Msg, 0, len(msgCounters)) - for _, c := range msgCounters { - msgs = append(msgs, msgCounter{c, false}) - } - - return &txTest{msgs, counter, false} -} - -// a msg we dont know how to route -type msgNoRoute struct { - msgCounter -} - -func (tx msgNoRoute) Route() string { return "noroute" } - -// a msg we dont know how to decode -type msgNoDecode struct { - msgCounter -} - -func (tx msgNoDecode) Route() string { return routeMsgCounter } - -// Another counter msg. Duplicate of msgCounter -type msgCounter2 struct { - Counter int64 -} - -// dummy implementation of proto.Message -func (msg msgCounter2) Reset() {} -func (msg msgCounter2) String() string { return "TODO" } -func (msg msgCounter2) ProtoMessage() {} - -// Implements Msg -func (msg msgCounter2) Route() string { return routeMsgCounter2 } -func (msg msgCounter2) Type() string { return "counter2" } -func (msg msgCounter2) GetSignBytes() []byte { return nil } -func (msg msgCounter2) GetSigners() []sdk.AccAddress { return nil } -func (msg msgCounter2) ValidateBasic() error { - if msg.Counter >= 0 { - return nil - } - return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "counter should be a non-negative integer") -} - -// A msg that sets a key/value pair. -type msgKeyValue struct { - Key []byte - Value []byte -} - -func (msg msgKeyValue) Reset() {} -func (msg msgKeyValue) String() string { return "TODO" } -func (msg msgKeyValue) ProtoMessage() {} -func (msg msgKeyValue) Route() string { return routeMsgKeyValue } -func (msg msgKeyValue) Type() string { return "keyValue" } -func (msg msgKeyValue) GetSignBytes() []byte { return nil } -func (msg msgKeyValue) GetSigners() []sdk.AccAddress { return nil } -func (msg msgKeyValue) ValidateBasic() error { - if msg.Key == nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "key cannot be nil") - } - if msg.Value == nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "value cannot be nil") - } - return nil -} - -// amino decode -func testTxDecoder(cdc *codec.LegacyAmino) sdk.TxDecoder { - return func(txBytes []byte) (sdk.Tx, error) { - var tx txTest - if len(txBytes) == 0 { - return nil, sdkerrors.Wrap(sdkerrors.ErrTxDecode, "tx bytes are empty") - } - - err := cdc.Unmarshal(txBytes, &tx) - if err != nil { - return nil, sdkerrors.ErrTxDecode - } - - return tx, nil - } -} - -func anteHandlerTxTest(t *testing.T, capKey sdk.StoreKey, storeKey []byte) sdk.AnteHandler { - return func(ctx sdk.Context, tx sdk.Tx, simulate bool) (sdk.Context, error) { - store := ctx.KVStore(capKey) - txTest := tx.(txTest) + ctx := app.NewContext(true, tmproto.Header{}) - if txTest.FailOnAnte { - return ctx, sdkerrors.Wrap(sdkerrors.ErrUnauthorized, "ante handler failure") - } + app.StoreConsensusParams(ctx, &abci.ConsensusParams{Block: &abci.BlockParams{MaxGas: 0}}) + require.Equal(t, uint64(0), app.getMaximumBlockGas(ctx)) - _, err := incrementingCounter(t, store, storeKey, txTest.Counter) - if err != nil { - return ctx, err - } + app.StoreConsensusParams(ctx, &abci.ConsensusParams{Block: &abci.BlockParams{MaxGas: -1}}) + require.Equal(t, uint64(0), app.getMaximumBlockGas(ctx)) - ctx.EventManager().EmitEvents( - counterEvent("ante_handler", txTest.Counter), - ) + app.StoreConsensusParams(ctx, &abci.ConsensusParams{Block: &abci.BlockParams{MaxGas: 5000000}}) + require.Equal(t, uint64(5000000), app.getMaximumBlockGas(ctx)) - return ctx, nil - } + app.StoreConsensusParams(ctx, &abci.ConsensusParams{Block: &abci.BlockParams{MaxGas: -5000000}}) + require.Panics(t, func() { app.getMaximumBlockGas(ctx) }) } -func counterEvent(evType string, msgCount int64) sdk.Events { - return sdk.Events{ - sdk.NewEvent( - evType, - sdk.NewAttribute("update_counter", fmt.Sprintf("%d", msgCount)), - ), +func TestListSnapshots(t *testing.T) { + type setupConfig struct { + blocks uint64 + blockTxs int + snapshotInterval uint64 + snapshotKeepEvery uint32 } -} - -func handlerMsgCounter(t *testing.T, capKey sdk.StoreKey, deliverKey []byte) sdk.Handler { - return func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - ctx = ctx.WithEventManager(sdk.NewEventManager()) - store := ctx.KVStore(capKey) - var msgCount int64 - switch m := msg.(type) { - case *msgCounter: - if m.FailOnHandler { - return nil, sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "message handler failure") - } + app, _ := setupBaseAppWithSnapshots(t, 2, 5) - msgCount = m.Counter - case *msgCounter2: - msgCount = m.Counter - } + expected := abci.ResponseListSnapshots{Snapshots: []*abci.Snapshot{ + {Height: 2, Format: 1, Chunks: 2}, + }} - ctx.EventManager().EmitEvents( - counterEvent(sdk.EventTypeMessage, msgCount), - ) - - res, err := incrementingCounter(t, store, deliverKey, msgCount) - if err != nil { - return nil, err - } + resp := app.ListSnapshots(abci.RequestListSnapshots{}) + queryResponse := app.Query(abci.RequestQuery{ + Path: "/app/snapshots", + }) - res.Events = ctx.EventManager().Events().ToABCIEvents() - return res, nil - } -} + queryListSnapshotsResp := abci.ResponseListSnapshots{} + err := json.Unmarshal(queryResponse.Value, &queryListSnapshotsResp) + require.NoError(t, err) -func getIntFromStore(store sdk.KVStore, key []byte) int64 { - bz := store.Get(key) - if len(bz) == 0 { - return 0 - } - i, err := binary.ReadVarint(bytes.NewBuffer(bz)) - if err != nil { - panic(err) + for i, s := range resp.Snapshots { + querySnapshot := queryListSnapshotsResp.Snapshots[i] + // we check that the query snapshot and function snapshot are equal + // Then we check that the hash and metadata are not empty. We atm + // do not have a good way to generate the expected value for these. + assert.Equal(t, *s, *querySnapshot) + assert.NotEmpty(t, s.Hash) + assert.NotEmpty(t, s.Metadata) + // Set hash and metadata to nil, so we can check the other snapshot + // fields against expected + s.Hash = nil + s.Metadata = nil } - return i -} - -func setIntOnStore(store sdk.KVStore, key []byte, i int64) { - bz := make([]byte, 8) - n := binary.PutVarint(bz, i) - store.Set(key, bz[:n]) -} - -// check counter matches what's in store. -// increment and store -func incrementingCounter(t *testing.T, store sdk.KVStore, counterKey []byte, counter int64) (*sdk.Result, error) { - storedCounter := getIntFromStore(store, counterKey) - require.Equal(t, storedCounter, counter) - setIntOnStore(store, counterKey, counter+1) - return &sdk.Result{}, nil -} - -//--------------------------------------------------------------------- -// Tx processing - CheckTx, DeliverTx, SimulateTx. -// These tests use the serialized tx as input, while most others will use the -// Check(), Deliver(), Simulate() methods directly. -// Ensure that Check/Deliver/Simulate work as expected with the store. - -// Test that successive CheckTx can see each others' effects -// on the store within a block, and that the CheckTx state -// gets reset to the latest committed state during Commit -func TestCheckTx(t *testing.T) { - // This ante handler reads the key and checks that the value matches the current counter. - // This ensures changes to the kvstore persist across successive CheckTx. - counterKey := []byte("counter-key") - - anteOpt := func(bapp *BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, counterKey)) } - routerOpt := func(bapp *BaseApp) { - // TODO: can remove this once CheckTx doesnt process msgs. - bapp.Router().AddRoute(sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - return &sdk.Result{}, nil - })) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - - nTxs := int64(5) - app.InitChain(abci.RequestInitChain{}) - - // Create same codec used in txDecoder - codec := codec.NewLegacyAmino() - registerTestCodec(codec) - - for i := int64(0); i < nTxs; i++ { - tx := newTxCounter(i, 0) // no messages - txBytes, err := codec.Marshal(tx) - require.NoError(t, err) - r := app.CheckTx(abci.RequestCheckTx{Tx: txBytes}) - require.Empty(t, r.GetEvents()) - require.True(t, r.IsOK(), fmt.Sprintf("%v", r)) - } - - checkStateStore := app.checkState.ctx.KVStore(capKey1) - storedCounter := getIntFromStore(checkStateStore, counterKey) - - // Ensure AnteHandler ran - require.Equal(t, nTxs, storedCounter) - - // If a block is committed, CheckTx state should be reset. - header := tmproto.Header{Height: 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header, Hash: []byte("hash")}) - - require.NotNil(t, app.checkState.ctx.BlockGasMeter(), "block gas meter should have been set to checkState") - require.NotEmpty(t, app.checkState.ctx.HeaderHash()) - - app.EndBlock(abci.RequestEndBlock{}) - app.Commit() - - checkStateStore = app.checkState.ctx.KVStore(capKey1) - storedBytes := checkStateStore.Get(counterKey) - require.Nil(t, storedBytes) -} - -// Test that successive DeliverTx can see each others' effects -// on the store, both within and across blocks. -func TestDeliverTx(t *testing.T) { - // test increments in the ante - anteKey := []byte("ante-key") - anteOpt := func(bapp *BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) } - - // test increments in the handler - deliverKey := []byte("deliver-key") - routerOpt := func(bapp *BaseApp) { - r := sdk.NewRoute(routeMsgCounter, handlerMsgCounter(t, capKey1, deliverKey)) - bapp.Router().AddRoute(r) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - app.InitChain(abci.RequestInitChain{}) - - // Create same codec used in txDecoder - codec := codec.NewLegacyAmino() - registerTestCodec(codec) - - nBlocks := 3 - txPerHeight := 5 - - for blockN := 0; blockN < nBlocks; blockN++ { - header := tmproto.Header{Height: int64(blockN) + 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - for i := 0; i < txPerHeight; i++ { - counter := int64(blockN*txPerHeight + i) - tx := newTxCounter(counter, counter) - - txBytes, err := codec.Marshal(tx) - require.NoError(t, err) - - res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) - events := res.GetEvents() - require.Len(t, events, 3, "should contain ante handler, message type and counter events respectively") - require.Equal(t, sdk.MarkEventsToIndex(counterEvent("ante_handler", counter).ToABCIEvents(), map[string]struct{}{})[0], events[0], "ante handler event") - require.Equal(t, sdk.MarkEventsToIndex(counterEvent(sdk.EventTypeMessage, counter).ToABCIEvents(), map[string]struct{}{})[0], events[2], "msg handler update counter event") - } - - app.EndBlock(abci.RequestEndBlock{}) - app.Commit() - } -} - -// Number of messages doesn't matter to CheckTx. -func TestMultiMsgCheckTx(t *testing.T) { - // TODO: ensure we get the same results - // with one message or many -} - -// One call to DeliverTx should process all the messages, in order. -func TestMultiMsgDeliverTx(t *testing.T) { - // increment the tx counter - anteKey := []byte("ante-key") - anteOpt := func(bapp *BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) } - - // increment the msg counter - deliverKey := []byte("deliver-key") - deliverKey2 := []byte("deliver-key2") - routerOpt := func(bapp *BaseApp) { - r1 := sdk.NewRoute(routeMsgCounter, handlerMsgCounter(t, capKey1, deliverKey)) - r2 := sdk.NewRoute(routeMsgCounter2, handlerMsgCounter(t, capKey1, deliverKey2)) - bapp.Router().AddRoute(r1) - bapp.Router().AddRoute(r2) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - - // Create same codec used in txDecoder - codec := codec.NewLegacyAmino() - registerTestCodec(codec) - - // run a multi-msg tx - // with all msgs the same route - - header := tmproto.Header{Height: 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - tx := newTxCounter(0, 0, 1, 2) - txBytes, err := codec.Marshal(tx) - require.NoError(t, err) - res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) - - store := app.deliverState.ctx.KVStore(capKey1) - - // tx counter only incremented once - txCounter := getIntFromStore(store, anteKey) - require.Equal(t, int64(1), txCounter) - - // msg counter incremented three times - msgCounter := getIntFromStore(store, deliverKey) - require.Equal(t, int64(3), msgCounter) - - // replace the second message with a msgCounter2 - - tx = newTxCounter(1, 3) - tx.Msgs = append(tx.Msgs, msgCounter2{0}) - tx.Msgs = append(tx.Msgs, msgCounter2{1}) - txBytes, err = codec.Marshal(tx) - require.NoError(t, err) - res = app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) - - store = app.deliverState.ctx.KVStore(capKey1) - - // tx counter only incremented once - txCounter = getIntFromStore(store, anteKey) - require.Equal(t, int64(2), txCounter) - - // original counter increments by one - // new counter increments by two - msgCounter = getIntFromStore(store, deliverKey) - require.Equal(t, int64(4), msgCounter) - msgCounter2 := getIntFromStore(store, deliverKey2) - require.Equal(t, int64(2), msgCounter2) -} - -// Interleave calls to Check and Deliver and ensure -// that there is no cross-talk. Check sees results of the previous Check calls -// and Deliver sees that of the previous Deliver calls, but they don't see eachother. -func TestConcurrentCheckDeliver(t *testing.T) { - // TODO -} - -// Simulate a transaction that uses gas to compute the gas. -// Simulate() and Query("/app/simulate", txBytes) should give -// the same results. -func TestSimulateTx(t *testing.T) { - gasConsumed := uint64(5) - - anteOpt := func(bapp *BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - newCtx = ctx.WithGasMeter(sdk.NewGasMeter(gasConsumed)) - return - }) - } - - routerOpt := func(bapp *BaseApp) { - r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - ctx.GasMeter().ConsumeGas(gasConsumed, "test") - return &sdk.Result{}, nil - }) - bapp.Router().AddRoute(r) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - - app.InitChain(abci.RequestInitChain{}) - - // Create same codec used in txDecoder - cdc := codec.NewLegacyAmino() - registerTestCodec(cdc) - - nBlocks := 3 - for blockN := 0; blockN < nBlocks; blockN++ { - count := int64(blockN + 1) - header := tmproto.Header{Height: count} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - tx := newTxCounter(count, count) - txBytes, err := cdc.Marshal(tx) - require.Nil(t, err) - - // simulate a message, check gas reported - gInfo, result, err := app.Simulate(txBytes) - require.NoError(t, err) - require.NotNil(t, result) - require.Equal(t, gasConsumed, gInfo.GasUsed) - - // simulate again, same result - gInfo, result, err = app.Simulate(txBytes) - require.NoError(t, err) - require.NotNil(t, result) - require.Equal(t, gasConsumed, gInfo.GasUsed) - - // simulate by calling Query with encoded tx - query := abci.RequestQuery{ - Path: "/app/simulate", - Data: txBytes, - } - queryResult := app.Query(query) - require.True(t, queryResult.IsOK(), queryResult.Log) - - var simRes sdk.SimulationResponse - require.NoError(t, jsonpb.Unmarshal(strings.NewReader(string(queryResult.Value)), &simRes)) - - require.Equal(t, gInfo, simRes.GasInfo) - require.Equal(t, result.Log, simRes.Result.Log) - require.Equal(t, result.Events, simRes.Result.Events) - require.True(t, bytes.Equal(result.Data, simRes.Result.Data)) - - app.EndBlock(abci.RequestEndBlock{}) - app.Commit() - } -} - -func TestRunInvalidTransaction(t *testing.T) { - anteOpt := func(bapp *BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - return - }) - } - routerOpt := func(bapp *BaseApp) { - r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - return &sdk.Result{}, nil - }) - bapp.Router().AddRoute(r) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - - header := tmproto.Header{Height: 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - // transaction with no messages - { - emptyTx := &txTest{} - _, result, err := app.Deliver(aminoTxEncoder(), emptyTx) - require.Error(t, err) - require.Nil(t, result) - - space, code, _ := sdkerrors.ABCIInfo(err, false) - require.EqualValues(t, sdkerrors.ErrInvalidRequest.Codespace(), space, err) - require.EqualValues(t, sdkerrors.ErrInvalidRequest.ABCICode(), code, err) - } - - // transaction where ValidateBasic fails - { - testCases := []struct { - tx *txTest - fail bool - }{ - {newTxCounter(0, 0), false}, - {newTxCounter(-1, 0), false}, - {newTxCounter(100, 100), false}, - {newTxCounter(100, 5, 4, 3, 2, 1), false}, - - {newTxCounter(0, -1), true}, - {newTxCounter(0, 1, -2), true}, - {newTxCounter(0, 1, 2, -10, 5), true}, - } - - for _, testCase := range testCases { - tx := testCase.tx - _, result, err := app.Deliver(aminoTxEncoder(), tx) - - if testCase.fail { - require.Error(t, err) - - space, code, _ := sdkerrors.ABCIInfo(err, false) - require.EqualValues(t, sdkerrors.ErrInvalidSequence.Codespace(), space, err) - require.EqualValues(t, sdkerrors.ErrInvalidSequence.ABCICode(), code, err) - } else { - require.NotNil(t, result) - } - } - } - - // transaction with no known route - { - unknownRouteTx := txTest{[]sdk.Msg{msgNoRoute{}}, 0, false} - _, result, err := app.Deliver(aminoTxEncoder(), unknownRouteTx) - require.Error(t, err) - require.Nil(t, result) - - space, code, _ := sdkerrors.ABCIInfo(err, false) - require.EqualValues(t, sdkerrors.ErrUnknownRequest.Codespace(), space, err) - require.EqualValues(t, sdkerrors.ErrUnknownRequest.ABCICode(), code, err) - - unknownRouteTx = txTest{[]sdk.Msg{msgCounter{}, msgNoRoute{}}, 0, false} - _, result, err = app.Deliver(aminoTxEncoder(), unknownRouteTx) - require.Error(t, err) - require.Nil(t, result) - - space, code, _ = sdkerrors.ABCIInfo(err, false) - require.EqualValues(t, sdkerrors.ErrUnknownRequest.Codespace(), space, err) - require.EqualValues(t, sdkerrors.ErrUnknownRequest.ABCICode(), code, err) - } - - // Transaction with an unregistered message - { - tx := newTxCounter(0, 0) - tx.Msgs = append(tx.Msgs, msgNoDecode{}) - - // new codec so we can encode the tx, but we shouldn't be able to decode - newCdc := codec.NewLegacyAmino() - registerTestCodec(newCdc) - newCdc.RegisterConcrete(&msgNoDecode{}, "cosmos-sdk/baseapp/msgNoDecode", nil) - - txBytes, err := newCdc.Marshal(tx) - require.NoError(t, err) - - res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.EqualValues(t, sdkerrors.ErrTxDecode.ABCICode(), res.Code) - require.EqualValues(t, sdkerrors.ErrTxDecode.Codespace(), res.Codespace) - } -} - -// Test that transactions exceeding gas limits fail -func TestTxGasLimits(t *testing.T) { - gasGranted := uint64(10) - anteOpt := func(bapp *BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - newCtx = ctx.WithGasMeter(sdk.NewGasMeter(gasGranted)) - - // AnteHandlers must have their own defer/recover in order for the BaseApp - // to know how much gas was used! This is because the GasMeter is created in - // the AnteHandler, but if it panics the context won't be set properly in - // runTx's recover call. - defer func() { - if r := recover(); r != nil { - switch rType := r.(type) { - case sdk.ErrorOutOfGas: - err = sdkerrors.Wrapf(sdkerrors.ErrOutOfGas, "out of gas in location: %v", rType.Descriptor) - default: - panic(r) - } - } - }() - - count := tx.(txTest).Counter - newCtx.GasMeter().ConsumeGas(uint64(count), "counter-ante") - - return newCtx, nil - }) - } - - routerOpt := func(bapp *BaseApp) { - r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - count := msg.(*msgCounter).Counter - ctx.GasMeter().ConsumeGas(uint64(count), "counter-handler") - return &sdk.Result{}, nil - }) - bapp.Router().AddRoute(r) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - - header := tmproto.Header{Height: 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - testCases := []struct { - tx *txTest - gasUsed uint64 - fail bool - }{ - {newTxCounter(0, 0), 0, false}, - {newTxCounter(1, 1), 2, false}, - {newTxCounter(9, 1), 10, false}, - {newTxCounter(1, 9), 10, false}, - {newTxCounter(10, 0), 10, false}, - {newTxCounter(0, 10), 10, false}, - {newTxCounter(0, 8, 2), 10, false}, - {newTxCounter(0, 5, 1, 1, 1, 1, 1), 10, false}, - {newTxCounter(0, 5, 1, 1, 1, 1), 9, false}, - - {newTxCounter(9, 2), 11, true}, - {newTxCounter(2, 9), 11, true}, - {newTxCounter(9, 1, 1), 11, true}, - {newTxCounter(1, 8, 1, 1), 11, true}, - {newTxCounter(11, 0), 11, true}, - {newTxCounter(0, 11), 11, true}, - {newTxCounter(0, 5, 11), 16, true}, - } - - for i, tc := range testCases { - tx := tc.tx - gInfo, result, err := app.Deliver(aminoTxEncoder(), tx) - - // check gas used and wanted - require.Equal(t, tc.gasUsed, gInfo.GasUsed, fmt.Sprintf("tc #%d; gas: %v, result: %v, err: %s", i, gInfo, result, err)) - - // check for out of gas - if !tc.fail { - require.NotNil(t, result, fmt.Sprintf("%d: %v, %v", i, tc, err)) - } else { - require.Error(t, err) - require.Nil(t, result) - - space, code, _ := sdkerrors.ABCIInfo(err, false) - require.EqualValues(t, sdkerrors.ErrOutOfGas.Codespace(), space, err) - require.EqualValues(t, sdkerrors.ErrOutOfGas.ABCICode(), code, err) - } - } -} - -// Test that transactions exceeding gas limits fail -func TestMaxBlockGasLimits(t *testing.T) { - gasGranted := uint64(10) - anteOpt := func(bapp *BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - newCtx = ctx.WithGasMeter(sdk.NewGasMeter(gasGranted)) - - defer func() { - if r := recover(); r != nil { - switch rType := r.(type) { - case sdk.ErrorOutOfGas: - err = sdkerrors.Wrapf(sdkerrors.ErrOutOfGas, "out of gas in location: %v", rType.Descriptor) - default: - panic(r) - } - } - }() - - count := tx.(txTest).Counter - newCtx.GasMeter().ConsumeGas(uint64(count), "counter-ante") - - return - }) - } - - routerOpt := func(bapp *BaseApp) { - r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - count := msg.(*msgCounter).Counter - ctx.GasMeter().ConsumeGas(uint64(count), "counter-handler") - return &sdk.Result{}, nil - }) - bapp.Router().AddRoute(r) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - app.InitChain(abci.RequestInitChain{ - ConsensusParams: &abci.ConsensusParams{ - Block: &abci.BlockParams{ - MaxGas: 100, - }, - }, - }) - - testCases := []struct { - tx *txTest - numDelivers int - gasUsedPerDeliver uint64 - fail bool - failAfterDeliver int - }{ - {newTxCounter(0, 0), 0, 0, false, 0}, - {newTxCounter(9, 1), 2, 10, false, 0}, - {newTxCounter(10, 0), 3, 10, false, 0}, - {newTxCounter(10, 0), 10, 10, false, 0}, - {newTxCounter(2, 7), 11, 9, false, 0}, - {newTxCounter(10, 0), 10, 10, false, 0}, // hit the limit but pass - - {newTxCounter(10, 0), 11, 10, true, 10}, - {newTxCounter(10, 0), 15, 10, true, 10}, - {newTxCounter(9, 0), 12, 9, true, 11}, // fly past the limit - } - - for i, tc := range testCases { - tx := tc.tx - - // reset the block gas - header := tmproto.Header{Height: app.LastBlockHeight() + 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - // execute the transaction multiple times - for j := 0; j < tc.numDelivers; j++ { - _, result, err := app.Deliver(aminoTxEncoder(), tx) - - ctx := app.getState(runTxModeDeliver).ctx - - // check for failed transactions - if tc.fail && (j+1) > tc.failAfterDeliver { - require.Error(t, err, fmt.Sprintf("tc #%d; result: %v, err: %s", i, result, err)) - require.Nil(t, result, fmt.Sprintf("tc #%d; result: %v, err: %s", i, result, err)) - - space, code, _ := sdkerrors.ABCIInfo(err, false) - require.EqualValues(t, sdkerrors.ErrOutOfGas.Codespace(), space, err) - require.EqualValues(t, sdkerrors.ErrOutOfGas.ABCICode(), code, err) - require.True(t, ctx.BlockGasMeter().IsOutOfGas()) - } else { - // check gas used and wanted - blockGasUsed := ctx.BlockGasMeter().GasConsumed() - expBlockGasUsed := tc.gasUsedPerDeliver * uint64(j+1) - require.Equal( - t, expBlockGasUsed, blockGasUsed, - fmt.Sprintf("%d,%d: %v, %v, %v, %v", i, j, tc, expBlockGasUsed, blockGasUsed, result), - ) - - require.NotNil(t, result, fmt.Sprintf("tc #%d; currDeliver: %d, result: %v, err: %s", i, j, result, err)) - require.False(t, ctx.BlockGasMeter().IsPastLimit()) - } - } - } -} - -// Test custom panic handling within app.DeliverTx method -func TestCustomRunTxPanicHandler(t *testing.T) { - const customPanicMsg = "test panic" - anteErr := sdkerrors.Register("fakeModule", 100500, "fakeError") - - anteOpt := func(bapp *BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - panic(sdkerrors.Wrap(anteErr, "anteHandler")) - }) - } - routerOpt := func(bapp *BaseApp) { - r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - return &sdk.Result{}, nil - }) - bapp.Router().AddRoute(r) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - - header := tmproto.Header{Height: 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - app.AddRunTxRecoveryHandler(func(recoveryObj interface{}) error { - err, ok := recoveryObj.(error) - if !ok { - return nil - } - - if anteErr.Is(err) { - panic(customPanicMsg) - } else { - return nil - } - }) - - // Transaction should panic with custom handler above - { - tx := newTxCounter(0, 0) - - require.PanicsWithValue(t, customPanicMsg, func() { app.Deliver(aminoTxEncoder(), tx) }) - } -} - -func TestBaseAppAnteHandler(t *testing.T) { - anteKey := []byte("ante-key") - anteOpt := func(bapp *BaseApp) { - bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) - } - - deliverKey := []byte("deliver-key") - routerOpt := func(bapp *BaseApp) { - r := sdk.NewRoute(routeMsgCounter, handlerMsgCounter(t, capKey1, deliverKey)) - bapp.Router().AddRoute(r) - } - - cdc := codec.NewLegacyAmino() - app := setupBaseApp(t, anteOpt, routerOpt) - - app.InitChain(abci.RequestInitChain{}) - registerTestCodec(cdc) - - header := tmproto.Header{Height: app.LastBlockHeight() + 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - // execute a tx that will fail ante handler execution - // - // NOTE: State should not be mutated here. This will be implicitly checked by - // the next txs ante handler execution (anteHandlerTxTest). - tx := newTxCounter(0, 0) - tx.setFailOnAnte(true) - txBytes, err := cdc.Marshal(tx) - require.NoError(t, err) - res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.Empty(t, res.Events) - require.False(t, res.IsOK(), fmt.Sprintf("%v", res)) - - ctx := app.getState(runTxModeDeliver).ctx - store := ctx.KVStore(capKey1) - require.Equal(t, int64(0), getIntFromStore(store, anteKey)) - - // execute at tx that will pass the ante handler (the checkTx state should - // mutate) but will fail the message handler - tx = newTxCounter(0, 0) - tx.setFailOnHandler(true) - - txBytes, err = cdc.Marshal(tx) - require.NoError(t, err) - - res = app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - // should emit ante event - require.NotEmpty(t, res.Events) - require.False(t, res.IsOK(), fmt.Sprintf("%v", res)) - - ctx = app.getState(runTxModeDeliver).ctx - store = ctx.KVStore(capKey1) - require.Equal(t, int64(1), getIntFromStore(store, anteKey)) - require.Equal(t, int64(0), getIntFromStore(store, deliverKey)) - - // execute a successful ante handler and message execution where state is - // implicitly checked by previous tx executions - tx = newTxCounter(1, 0) - - txBytes, err = cdc.Marshal(tx) - require.NoError(t, err) - - res = app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.NotEmpty(t, res.Events) - require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) - - ctx = app.getState(runTxModeDeliver).ctx - store = ctx.KVStore(capKey1) - require.Equal(t, int64(2), getIntFromStore(store, anteKey)) - require.Equal(t, int64(1), getIntFromStore(store, deliverKey)) - - // commit - app.EndBlock(abci.RequestEndBlock{}) - app.Commit() -} - -func TestGasConsumptionBadTx(t *testing.T) { - gasWanted := uint64(5) - anteOpt := func(bapp *BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - newCtx = ctx.WithGasMeter(sdk.NewGasMeter(gasWanted)) - - defer func() { - if r := recover(); r != nil { - switch rType := r.(type) { - case sdk.ErrorOutOfGas: - log := fmt.Sprintf("out of gas in location: %v", rType.Descriptor) - err = sdkerrors.Wrap(sdkerrors.ErrOutOfGas, log) - default: - panic(r) - } - } - }() - - txTest := tx.(txTest) - newCtx.GasMeter().ConsumeGas(uint64(txTest.Counter), "counter-ante") - if txTest.FailOnAnte { - return newCtx, sdkerrors.Wrap(sdkerrors.ErrUnauthorized, "ante handler failure") - } - - return - }) - } - - routerOpt := func(bapp *BaseApp) { - r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - count := msg.(*msgCounter).Counter - ctx.GasMeter().ConsumeGas(uint64(count), "counter-handler") - return &sdk.Result{}, nil - }) - bapp.Router().AddRoute(r) - } - - cdc := codec.NewLegacyAmino() - registerTestCodec(cdc) - - app := setupBaseApp(t, anteOpt, routerOpt) - app.InitChain(abci.RequestInitChain{ - ConsensusParams: &abci.ConsensusParams{ - Block: &abci.BlockParams{ - MaxGas: 9, - }, - }, - }) - - app.InitChain(abci.RequestInitChain{}) - - header := tmproto.Header{Height: app.LastBlockHeight() + 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - tx := newTxCounter(5, 0) - tx.setFailOnAnte(true) - txBytes, err := cdc.Marshal(tx) - require.NoError(t, err) - - res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.False(t, res.IsOK(), fmt.Sprintf("%v", res)) - - // require next tx to fail due to black gas limit - tx = newTxCounter(5, 0) - txBytes, err = cdc.Marshal(tx) - require.NoError(t, err) - - res = app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.False(t, res.IsOK(), fmt.Sprintf("%v", res)) -} - -// Test that we can only query from the latest committed state. -func TestQuery(t *testing.T) { - key, value := []byte("hello"), []byte("goodbye") - anteOpt := func(bapp *BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - store := ctx.KVStore(capKey1) - store.Set(key, value) - return - }) - } - - routerOpt := func(bapp *BaseApp) { - r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - store := ctx.KVStore(capKey1) - store.Set(key, value) - return &sdk.Result{}, nil - }) - bapp.Router().AddRoute(r) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - - app.InitChain(abci.RequestInitChain{}) - - // NOTE: "/store/key1" tells us KVStore - // and the final "/key" says to use the data as the - // key in the given KVStore ... - query := abci.RequestQuery{ - Path: "/store/key1/key", - Data: key, - } - tx := newTxCounter(0, 0) - - // query is empty before we do anything - res := app.Query(query) - require.Equal(t, 0, len(res.Value)) - - // query is still empty after a CheckTx - _, resTx, err := app.Check(aminoTxEncoder(), tx) - require.NoError(t, err) - require.NotNil(t, resTx) - res = app.Query(query) - require.Equal(t, 0, len(res.Value)) - - // query is still empty after a DeliverTx before we commit - header := tmproto.Header{Height: app.LastBlockHeight() + 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - _, resTx, err = app.Deliver(aminoTxEncoder(), tx) - require.NoError(t, err) - require.NotNil(t, resTx) - res = app.Query(query) - require.Equal(t, 0, len(res.Value)) - - // query returns correct value after Commit - app.Commit() - res = app.Query(query) - require.Equal(t, value, res.Value) -} - -func TestGRPCQuery(t *testing.T) { - grpcQueryOpt := func(bapp *BaseApp) { - testdata.RegisterQueryServer( - bapp.GRPCQueryRouter(), - testdata.QueryImpl{}, - ) - } - - app := setupBaseApp(t, grpcQueryOpt) - - app.InitChain(abci.RequestInitChain{}) - header := tmproto.Header{Height: app.LastBlockHeight() + 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - app.Commit() - - req := testdata.SayHelloRequest{Name: "foo"} - reqBz, err := req.Marshal() - require.NoError(t, err) - - reqQuery := abci.RequestQuery{ - Data: reqBz, - Path: "/testdata.Query/SayHello", - } - - resQuery := app.Query(reqQuery) - - require.Equal(t, abci.CodeTypeOK, resQuery.Code, resQuery) - - var res testdata.SayHelloResponse - err = res.Unmarshal(resQuery.Value) - require.NoError(t, err) - require.Equal(t, "Hello foo!", res.Greeting) -} - -// Test p2p filter queries -func TestP2PQuery(t *testing.T) { - addrPeerFilterOpt := func(bapp *BaseApp) { - bapp.SetAddrPeerFilter(func(addrport string) abci.ResponseQuery { - require.Equal(t, "1.1.1.1:8000", addrport) - return abci.ResponseQuery{Code: uint32(3)} - }) - } - - idPeerFilterOpt := func(bapp *BaseApp) { - bapp.SetIDPeerFilter(func(id string) abci.ResponseQuery { - require.Equal(t, "testid", id) - return abci.ResponseQuery{Code: uint32(4)} - }) - } - - app := setupBaseApp(t, addrPeerFilterOpt, idPeerFilterOpt) - - addrQuery := abci.RequestQuery{ - Path: "/p2p/filter/addr/1.1.1.1:8000", - } - res := app.Query(addrQuery) - require.Equal(t, uint32(3), res.Code) - - idQuery := abci.RequestQuery{ - Path: "/p2p/filter/id/testid", - } - res = app.Query(idQuery) - require.Equal(t, uint32(4), res.Code) -} - -func TestGetMaximumBlockGas(t *testing.T) { - app := setupBaseApp(t) - app.InitChain(abci.RequestInitChain{}) - ctx := app.NewContext(true, tmproto.Header{}) - - app.StoreConsensusParams(ctx, &abci.ConsensusParams{Block: &abci.BlockParams{MaxGas: 0}}) - require.Equal(t, uint64(0), app.getMaximumBlockGas(ctx)) - - app.StoreConsensusParams(ctx, &abci.ConsensusParams{Block: &abci.BlockParams{MaxGas: -1}}) - require.Equal(t, uint64(0), app.getMaximumBlockGas(ctx)) - - app.StoreConsensusParams(ctx, &abci.ConsensusParams{Block: &abci.BlockParams{MaxGas: 5000000}}) - require.Equal(t, uint64(5000000), app.getMaximumBlockGas(ctx)) - - app.StoreConsensusParams(ctx, &abci.ConsensusParams{Block: &abci.BlockParams{MaxGas: -5000000}}) - require.Panics(t, func() { app.getMaximumBlockGas(ctx) }) -} - -func TestListSnapshots(t *testing.T) { - app, teardown := setupBaseAppWithSnapshots(t, 5, 4) - defer teardown() - - resp := app.ListSnapshots(abci.RequestListSnapshots{}) - for _, s := range resp.Snapshots { - assert.NotEmpty(t, s.Hash) - assert.NotEmpty(t, s.Metadata) - s.Hash = nil - s.Metadata = nil - } - assert.Equal(t, abci.ResponseListSnapshots{Snapshots: []*abci.Snapshot{ - {Height: 4, Format: 1, Chunks: 2}, - {Height: 2, Format: 1, Chunks: 1}, - }}, resp) -} - -func TestLoadSnapshotChunk(t *testing.T) { - app, teardown := setupBaseAppWithSnapshots(t, 2, 5) - defer teardown() - - testcases := map[string]struct { - height uint64 - format uint32 - chunk uint32 - expectEmpty bool - }{ - "Existing snapshot": {2, 1, 1, false}, - "Missing height": {100, 1, 1, true}, - "Missing format": {2, 2, 1, true}, - "Missing chunk": {2, 1, 9, true}, - "Zero height": {0, 1, 1, true}, - "Zero format": {2, 0, 1, true}, - "Zero chunk": {2, 1, 0, false}, - } - for name, tc := range testcases { - tc := tc - t.Run(name, func(t *testing.T) { - resp := app.LoadSnapshotChunk(abci.RequestLoadSnapshotChunk{ - Height: tc.height, - Format: tc.format, - Chunk: tc.chunk, - }) - if tc.expectEmpty { - assert.Equal(t, abci.ResponseLoadSnapshotChunk{}, resp) - return - } - assert.NotEmpty(t, resp.Chunk) - }) - } -} - -func TestOfferSnapshot_Errors(t *testing.T) { - // Set up app before test cases, since it's fairly expensive. - app, teardown := setupBaseAppWithSnapshots(t, 0, 0) - defer teardown() - - m := snapshottypes.Metadata{ChunkHashes: [][]byte{{1}, {2}, {3}}} - metadata, err := m.Marshal() - require.NoError(t, err) - hash := []byte{1, 2, 3} - - testcases := map[string]struct { - snapshot *abci.Snapshot - result abci.ResponseOfferSnapshot_Result - }{ - "nil snapshot": {nil, abci.ResponseOfferSnapshot_REJECT}, - "invalid format": {&abci.Snapshot{ - Height: 1, Format: 9, Chunks: 3, Hash: hash, Metadata: metadata, - }, abci.ResponseOfferSnapshot_REJECT_FORMAT}, - "incorrect chunk count": {&abci.Snapshot{ - Height: 1, Format: 1, Chunks: 2, Hash: hash, Metadata: metadata, - }, abci.ResponseOfferSnapshot_REJECT}, - "no chunks": {&abci.Snapshot{ - Height: 1, Format: 1, Chunks: 0, Hash: hash, Metadata: metadata, - }, abci.ResponseOfferSnapshot_REJECT}, - "invalid metadata serialization": {&abci.Snapshot{ - Height: 1, Format: 1, Chunks: 0, Hash: hash, Metadata: []byte{3, 1, 4}, - }, abci.ResponseOfferSnapshot_REJECT}, - } - for name, tc := range testcases { - tc := tc - t.Run(name, func(t *testing.T) { - resp := app.OfferSnapshot(abci.RequestOfferSnapshot{Snapshot: tc.snapshot}) - assert.Equal(t, tc.result, resp.Result) - }) - } - - // Offering a snapshot after one has been accepted should error - resp := app.OfferSnapshot(abci.RequestOfferSnapshot{Snapshot: &abci.Snapshot{ - Height: 1, - Format: snapshottypes.CurrentFormat, - Chunks: 3, - Hash: []byte{1, 2, 3}, - Metadata: metadata, - }}) - require.Equal(t, abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, resp) - - resp = app.OfferSnapshot(abci.RequestOfferSnapshot{Snapshot: &abci.Snapshot{ - Height: 2, - Format: snapshottypes.CurrentFormat, - Chunks: 3, - Hash: []byte{1, 2, 3}, - Metadata: metadata, - }}) - require.Equal(t, abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, resp) -} - -func TestApplySnapshotChunk(t *testing.T) { - source, teardown := setupBaseAppWithSnapshots(t, 4, 10) - defer teardown() - - target, teardown := setupBaseAppWithSnapshots(t, 0, 0) - defer teardown() - - // Fetch latest snapshot to restore - respList := source.ListSnapshots(abci.RequestListSnapshots{}) - require.NotEmpty(t, respList.Snapshots) - snapshot := respList.Snapshots[0] - - // Make sure the snapshot has at least 3 chunks - require.GreaterOrEqual(t, snapshot.Chunks, uint32(3), "Not enough snapshot chunks") - - // Begin a snapshot restoration in the target - respOffer := target.OfferSnapshot(abci.RequestOfferSnapshot{Snapshot: snapshot}) - require.Equal(t, abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, respOffer) - - // We should be able to pass an invalid chunk and get a verify failure, before reapplying it. - respApply := target.ApplySnapshotChunk(abci.RequestApplySnapshotChunk{ - Index: 0, - Chunk: []byte{9}, - Sender: "sender", - }) - require.Equal(t, abci.ResponseApplySnapshotChunk{ - Result: abci.ResponseApplySnapshotChunk_RETRY, - RefetchChunks: []uint32{0}, - RejectSenders: []string{"sender"}, - }, respApply) - - // Fetch each chunk from the source and apply it to the target - for index := uint32(0); index < snapshot.Chunks; index++ { - respChunk := source.LoadSnapshotChunk(abci.RequestLoadSnapshotChunk{ - Height: snapshot.Height, - Format: snapshot.Format, - Chunk: index, - }) - require.NotNil(t, respChunk.Chunk) - respApply := target.ApplySnapshotChunk(abci.RequestApplySnapshotChunk{ - Index: index, - Chunk: respChunk.Chunk, - }) - require.Equal(t, abci.ResponseApplySnapshotChunk{ - Result: abci.ResponseApplySnapshotChunk_ACCEPT, - }, respApply) - } - - // The target should now have the same hash as the source - assert.Equal(t, source.LastCommitID(), target.LastCommitID()) -} - -// NOTE: represents a new custom router for testing purposes of WithRouter() -type testCustomRouter struct { - routes sync.Map -} - -func (rtr *testCustomRouter) AddRoute(route sdk.Route) sdk.Router { - rtr.routes.Store(route.Path(), route.Handler()) - return rtr -} - -func (rtr *testCustomRouter) Route(ctx sdk.Context, path string) sdk.Handler { - if v, ok := rtr.routes.Load(path); ok { - if h, ok := v.(sdk.Handler); ok { - return h - } - } - return nil -} - -func TestWithRouter(t *testing.T) { - // test increments in the ante - anteKey := []byte("ante-key") - anteOpt := func(bapp *BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) } - - // test increments in the handler - deliverKey := []byte("deliver-key") - routerOpt := func(bapp *BaseApp) { - bapp.SetRouter(&testCustomRouter{routes: sync.Map{}}) - r := sdk.NewRoute(routeMsgCounter, handlerMsgCounter(t, capKey1, deliverKey)) - bapp.Router().AddRoute(r) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - app.InitChain(abci.RequestInitChain{}) - - // Create same codec used in txDecoder - codec := codec.NewLegacyAmino() - registerTestCodec(codec) - - nBlocks := 3 - txPerHeight := 5 - - for blockN := 0; blockN < nBlocks; blockN++ { - header := tmproto.Header{Height: int64(blockN) + 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - for i := 0; i < txPerHeight; i++ { - counter := int64(blockN*txPerHeight + i) - tx := newTxCounter(counter, counter) - - txBytes, err := codec.Marshal(tx) - require.NoError(t, err) - - res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) - } - - app.EndBlock(abci.RequestEndBlock{}) - app.Commit() - } -} - -func TestBaseApp_EndBlock(t *testing.T) { - db := dbm.NewMemDB() - name := t.Name() - logger := defaultLogger() - - cp := &abci.ConsensusParams{ - Block: &abci.BlockParams{ - MaxGas: 5000000, - }, - } - - app := NewBaseApp(name, logger, db, nil) - app.SetParamStore(¶mStore{db: dbm.NewMemDB()}) - app.InitChain(abci.RequestInitChain{ - ConsensusParams: cp, - }) - - app.SetEndBlocker(func(ctx sdk.Context, req abci.RequestEndBlock) abci.ResponseEndBlock { - return abci.ResponseEndBlock{ - ValidatorUpdates: []abci.ValidatorUpdate{ - {Power: 100}, - }, - } - }) - app.Seal() - - res := app.EndBlock(abci.RequestEndBlock{}) - require.Len(t, res.GetValidatorUpdates(), 1) - require.Equal(t, int64(100), res.GetValidatorUpdates()[0].Power) - require.Equal(t, cp.Block.MaxGas, res.ConsensusParamUpdates.Block.MaxGas) + assert.Equal(t, expected, resp) } diff --git a/baseapp/deliver_tx_test.go b/baseapp/deliver_tx_test.go new file mode 100644 index 00000000000..c0126f9964f --- /dev/null +++ b/baseapp/deliver_tx_test.go @@ -0,0 +1,1840 @@ +package baseapp + +import ( + "bytes" + "encoding/binary" + "fmt" + "math/rand" + "os" + "strings" + "sync" + "testing" + "time" + + "github.com/gogo/protobuf/jsonpb" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + dbm "github.com/tendermint/tm-db" + + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/snapshots" + snapshottypes "github.com/cosmos/cosmos-sdk/snapshots/types" + "github.com/cosmos/cosmos-sdk/store/rootmulti" + store "github.com/cosmos/cosmos-sdk/store/types" + "github.com/cosmos/cosmos-sdk/testutil/testdata" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +func TestLoadSnapshotChunk(t *testing.T) { + app, teardown := setupBaseAppWithSnapshots(t, 2, 5) + defer teardown() + + testcases := map[string]struct { + height uint64 + format uint32 + chunk uint32 + expectEmpty bool + }{ + "Existing snapshot": {2, 1, 1, false}, + "Missing height": {100, 1, 1, true}, + "Missing format": {2, 2, 1, true}, + "Missing chunk": {2, 1, 9, true}, + "Zero height": {0, 1, 1, true}, + "Zero format": {2, 0, 1, true}, + "Zero chunk": {2, 1, 0, false}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + resp := app.LoadSnapshotChunk(abci.RequestLoadSnapshotChunk{ + Height: tc.height, + Format: tc.format, + Chunk: tc.chunk, + }) + if tc.expectEmpty { + assert.Equal(t, abci.ResponseLoadSnapshotChunk{}, resp) + return + } + assert.NotEmpty(t, resp.Chunk) + }) + } +} + +func TestOfferSnapshot_Errors(t *testing.T) { + // Set up app before test cases, since it's fairly expensive. + app, teardown := setupBaseAppWithSnapshots(t, 0, 0) + defer teardown() + + m := snapshottypes.Metadata{ChunkHashes: [][]byte{{1}, {2}, {3}}} + metadata, err := m.Marshal() + require.NoError(t, err) + hash := []byte{1, 2, 3} + + testcases := map[string]struct { + snapshot *abci.Snapshot + result abci.ResponseOfferSnapshot_Result + }{ + "nil snapshot": {nil, abci.ResponseOfferSnapshot_REJECT}, + "invalid format": {&abci.Snapshot{ + Height: 1, Format: 9, Chunks: 3, Hash: hash, Metadata: metadata, + }, abci.ResponseOfferSnapshot_REJECT_FORMAT}, + "incorrect chunk count": {&abci.Snapshot{ + Height: 1, Format: 1, Chunks: 2, Hash: hash, Metadata: metadata, + }, abci.ResponseOfferSnapshot_REJECT}, + "no chunks": {&abci.Snapshot{ + Height: 1, Format: 1, Chunks: 0, Hash: hash, Metadata: metadata, + }, abci.ResponseOfferSnapshot_REJECT}, + "invalid metadata serialization": {&abci.Snapshot{ + Height: 1, Format: 1, Chunks: 0, Hash: hash, Metadata: []byte{3, 1, 4}, + }, abci.ResponseOfferSnapshot_REJECT}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + resp := app.OfferSnapshot(abci.RequestOfferSnapshot{Snapshot: tc.snapshot}) + assert.Equal(t, tc.result, resp.Result) + }) + } + + // Offering a snapshot after one has been accepted should error + resp := app.OfferSnapshot(abci.RequestOfferSnapshot{Snapshot: &abci.Snapshot{ + Height: 1, + Format: snapshottypes.CurrentFormat, + Chunks: 3, + Hash: []byte{1, 2, 3}, + Metadata: metadata, + }}) + require.Equal(t, abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, resp) + + resp = app.OfferSnapshot(abci.RequestOfferSnapshot{Snapshot: &abci.Snapshot{ + Height: 2, + Format: snapshottypes.CurrentFormat, + Chunks: 3, + Hash: []byte{1, 2, 3}, + Metadata: metadata, + }}) + require.Equal(t, abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, resp) +} + +func TestApplySnapshotChunk(t *testing.T) { + source, teardown := setupBaseAppWithSnapshots(t, 4, 10) + defer teardown() + + target, teardown := setupBaseAppWithSnapshots(t, 0, 0) + defer teardown() + + // Fetch latest snapshot to restore + respList := source.ListSnapshots(abci.RequestListSnapshots{}) + require.NotEmpty(t, respList.Snapshots) + snapshot := respList.Snapshots[0] + + // Make sure the snapshot has at least 3 chunks + require.GreaterOrEqual(t, snapshot.Chunks, uint32(3), "Not enough snapshot chunks") + + // Begin a snapshot restoration in the target + respOffer := target.OfferSnapshot(abci.RequestOfferSnapshot{Snapshot: snapshot}) + require.Equal(t, abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, respOffer) + + // We should be able to pass an invalid chunk and get a verify failure, before reapplying it. + respApply := target.ApplySnapshotChunk(abci.RequestApplySnapshotChunk{ + Index: 0, + Chunk: []byte{9}, + Sender: "sender", + }) + require.Equal(t, abci.ResponseApplySnapshotChunk{ + Result: abci.ResponseApplySnapshotChunk_RETRY, + RefetchChunks: []uint32{0}, + RejectSenders: []string{"sender"}, + }, respApply) + + // Fetch each chunk from the source and apply it to the target + for index := uint32(0); index < snapshot.Chunks; index++ { + respChunk := source.LoadSnapshotChunk(abci.RequestLoadSnapshotChunk{ + Height: snapshot.Height, + Format: snapshot.Format, + Chunk: index, + }) + require.NotNil(t, respChunk.Chunk) + respApply := target.ApplySnapshotChunk(abci.RequestApplySnapshotChunk{ + Index: index, + Chunk: respChunk.Chunk, + }) + require.Equal(t, abci.ResponseApplySnapshotChunk{ + Result: abci.ResponseApplySnapshotChunk_ACCEPT, + }, respApply) + } + + // The target should now have the same hash as the source + assert.Equal(t, source.LastCommitID(), target.LastCommitID()) +} + +// NOTE: represents a new custom router for testing purposes of WithRouter() +type testCustomRouter struct { + routes sync.Map +} + +func (rtr *testCustomRouter) AddRoute(route sdk.Route) sdk.Router { + rtr.routes.Store(route.Path(), route.Handler()) + return rtr +} + +func (rtr *testCustomRouter) Route(ctx sdk.Context, path string) sdk.Handler { + if v, ok := rtr.routes.Load(path); ok { + if h, ok := v.(sdk.Handler); ok { + return h + } + } + return nil +} + +func TestWithRouter(t *testing.T) { + // test increments in the ante + anteKey := []byte("ante-key") + anteOpt := func(bapp *BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) } + + // test increments in the handler + deliverKey := []byte("deliver-key") + routerOpt := func(bapp *BaseApp) { + bapp.SetRouter(&testCustomRouter{routes: sync.Map{}}) + r := sdk.NewRoute(routeMsgCounter, handlerMsgCounter(t, capKey1, deliverKey)) + bapp.Router().AddRoute(r) + } + + app := setupBaseApp(t, anteOpt, routerOpt) + app.InitChain(abci.RequestInitChain{}) + + // Create same codec used in txDecoder + codec := codec.NewLegacyAmino() + registerTestCodec(codec) + + nBlocks := 3 + txPerHeight := 5 + + for blockN := 0; blockN < nBlocks; blockN++ { + header := tmproto.Header{Height: int64(blockN) + 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + for i := 0; i < txPerHeight; i++ { + counter := int64(blockN*txPerHeight + i) + tx := newTxCounter(counter, counter) + + txBytes, err := codec.Marshal(tx) + require.NoError(t, err) + + res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) + } + + app.EndBlock(abci.RequestEndBlock{}) + app.Commit() + } +} + +func TestBaseApp_EndBlock(t *testing.T) { + db := dbm.NewMemDB() + name := t.Name() + logger := defaultLogger() + + cp := &abci.ConsensusParams{ + Block: &abci.BlockParams{ + MaxGas: 5000000, + }, + } + + app := NewBaseApp(name, logger, db, nil) + app.SetParamStore(¶mStore{db: dbm.NewMemDB()}) + app.InitChain(abci.RequestInitChain{ + ConsensusParams: cp, + }) + + app.SetEndBlocker(func(ctx sdk.Context, req abci.RequestEndBlock) abci.ResponseEndBlock { + return abci.ResponseEndBlock{ + ValidatorUpdates: []abci.ValidatorUpdate{ + {Power: 100}, + }, + } + }) + app.Seal() + + res := app.EndBlock(abci.RequestEndBlock{}) + require.Len(t, res.GetValidatorUpdates(), 1) + require.Equal(t, int64(100), res.GetValidatorUpdates()[0].Power) + require.Equal(t, cp.Block.MaxGas, res.ConsensusParamUpdates.Block.MaxGas) +} + +// Test that we can only query from the latest committed state. +func TestQuery(t *testing.T) { + key, value := []byte("hello"), []byte("goodbye") + anteOpt := func(bapp *BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + store := ctx.KVStore(capKey1) + store.Set(key, value) + return + }) + } + + routerOpt := func(bapp *BaseApp) { + r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { + store := ctx.KVStore(capKey1) + store.Set(key, value) + return &sdk.Result{}, nil + }) + bapp.Router().AddRoute(r) + } + + app := setupBaseApp(t, anteOpt, routerOpt) + + app.InitChain(abci.RequestInitChain{}) + + // NOTE: "/store/key1" tells us KVStore + // and the final "/key" says to use the data as the + // key in the given KVStore ... + query := abci.RequestQuery{ + Path: "/store/key1/key", + Data: key, + } + tx := newTxCounter(0, 0) + + // query is empty before we do anything + res := app.Query(query) + require.Equal(t, 0, len(res.Value)) + + // query is still empty after a CheckTx + _, resTx, err := app.Check(aminoTxEncoder(), tx) + require.NoError(t, err) + require.NotNil(t, resTx) + res = app.Query(query) + require.Equal(t, 0, len(res.Value)) + + // query is still empty after a DeliverTx before we commit + header := tmproto.Header{Height: app.LastBlockHeight() + 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + _, resTx, err = app.Deliver(aminoTxEncoder(), tx) + require.NoError(t, err) + require.NotNil(t, resTx) + res = app.Query(query) + require.Equal(t, 0, len(res.Value)) + + // query returns correct value after Commit + app.Commit() + res = app.Query(query) + require.Equal(t, value, res.Value) +} + +func TestGRPCQuery(t *testing.T) { + grpcQueryOpt := func(bapp *BaseApp) { + testdata.RegisterQueryServer( + bapp.GRPCQueryRouter(), + testdata.QueryImpl{}, + ) + } + + app := setupBaseApp(t, grpcQueryOpt) + + app.InitChain(abci.RequestInitChain{}) + header := tmproto.Header{Height: app.LastBlockHeight() + 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + app.Commit() + + req := testdata.SayHelloRequest{Name: "foo"} + reqBz, err := req.Marshal() + require.NoError(t, err) + + reqQuery := abci.RequestQuery{ + Data: reqBz, + Path: "/testdata.Query/SayHello", + } + + resQuery := app.Query(reqQuery) + + require.Equal(t, abci.CodeTypeOK, resQuery.Code, resQuery) + + var res testdata.SayHelloResponse + err = res.Unmarshal(resQuery.Value) + require.NoError(t, err) + require.Equal(t, "Hello foo!", res.Greeting) +} + +// Test p2p filter queries +func TestP2PQuery(t *testing.T) { + addrPeerFilterOpt := func(bapp *BaseApp) { + bapp.SetAddrPeerFilter(func(addrport string) abci.ResponseQuery { + require.Equal(t, "1.1.1.1:8000", addrport) + return abci.ResponseQuery{Code: uint32(3)} + }) + } + + idPeerFilterOpt := func(bapp *BaseApp) { + bapp.SetIDPeerFilter(func(id string) abci.ResponseQuery { + require.Equal(t, "testid", id) + return abci.ResponseQuery{Code: uint32(4)} + }) + } + + app := setupBaseApp(t, addrPeerFilterOpt, idPeerFilterOpt) + + addrQuery := abci.RequestQuery{ + Path: "/p2p/filter/addr/1.1.1.1:8000", + } + res := app.Query(addrQuery) + require.Equal(t, uint32(3), res.Code) + + idQuery := abci.RequestQuery{ + Path: "/p2p/filter/id/testid", + } + res = app.Query(idQuery) + require.Equal(t, uint32(4), res.Code) +} + +// One call to DeliverTx should process all the messages, in order. +func TestMultiMsgDeliverTx(t *testing.T) { + // increment the tx counter + anteKey := []byte("ante-key") + anteOpt := func(bapp *BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) } + + // increment the msg counter + deliverKey := []byte("deliver-key") + deliverKey2 := []byte("deliver-key2") + routerOpt := func(bapp *BaseApp) { + r1 := sdk.NewRoute(routeMsgCounter, handlerMsgCounter(t, capKey1, deliverKey)) + r2 := sdk.NewRoute(routeMsgCounter2, handlerMsgCounter(t, capKey1, deliverKey2)) + bapp.Router().AddRoute(r1) + bapp.Router().AddRoute(r2) + } + + app := setupBaseApp(t, anteOpt, routerOpt) + + // Create same codec used in txDecoder + codec := codec.NewLegacyAmino() + registerTestCodec(codec) + + // run a multi-msg tx + // with all msgs the same route + + header := tmproto.Header{Height: 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + tx := newTxCounter(0, 0, 1, 2) + txBytes, err := codec.Marshal(tx) + require.NoError(t, err) + res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) + + store := app.deliverState.ctx.KVStore(capKey1) + + // tx counter only incremented once + txCounter := getIntFromStore(store, anteKey) + require.Equal(t, int64(1), txCounter) + + // msg counter incremented three times + msgCounter := getIntFromStore(store, deliverKey) + require.Equal(t, int64(3), msgCounter) + + // replace the second message with a msgCounter2 + + tx = newTxCounter(1, 3) + tx.Msgs = append(tx.Msgs, msgCounter2{0}) + tx.Msgs = append(tx.Msgs, msgCounter2{1}) + txBytes, err = codec.Marshal(tx) + require.NoError(t, err) + res = app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) + + store = app.deliverState.ctx.KVStore(capKey1) + + // tx counter only incremented once + txCounter = getIntFromStore(store, anteKey) + require.Equal(t, int64(2), txCounter) + + // original counter increments by one + // new counter increments by two + msgCounter = getIntFromStore(store, deliverKey) + require.Equal(t, int64(4), msgCounter) + msgCounter2 := getIntFromStore(store, deliverKey2) + require.Equal(t, int64(2), msgCounter2) +} + +// Interleave calls to Check and Deliver and ensure +// that there is no cross-talk. Check sees results of the previous Check calls +// and Deliver sees that of the previous Deliver calls, but they don't see eachother. +func TestConcurrentCheckDeliver(t *testing.T) { + // TODO +} + +// Simulate a transaction that uses gas to compute the gas. +// Simulate() and Query("/app/simulate", txBytes) should give +// the same results. +func TestSimulateTx(t *testing.T) { + gasConsumed := uint64(5) + + anteOpt := func(bapp *BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + newCtx = ctx.WithGasMeter(sdk.NewGasMeter(gasConsumed)) + return + }) + } + + routerOpt := func(bapp *BaseApp) { + r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { + ctx.GasMeter().ConsumeGas(gasConsumed, "test") + return &sdk.Result{}, nil + }) + bapp.Router().AddRoute(r) + } + + app := setupBaseApp(t, anteOpt, routerOpt) + + app.InitChain(abci.RequestInitChain{}) + + // Create same codec used in txDecoder + cdc := codec.NewLegacyAmino() + registerTestCodec(cdc) + + nBlocks := 3 + for blockN := 0; blockN < nBlocks; blockN++ { + count := int64(blockN + 1) + header := tmproto.Header{Height: count} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + tx := newTxCounter(count, count) + txBytes, err := cdc.Marshal(tx) + require.Nil(t, err) + + // simulate a message, check gas reported + gInfo, result, err := app.Simulate(txBytes) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, gasConsumed, gInfo.GasUsed) + + // simulate again, same result + gInfo, result, err = app.Simulate(txBytes) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, gasConsumed, gInfo.GasUsed) + + // simulate by calling Query with encoded tx + query := abci.RequestQuery{ + Path: "/app/simulate", + Data: txBytes, + } + queryResult := app.Query(query) + require.True(t, queryResult.IsOK(), queryResult.Log) + + var simRes sdk.SimulationResponse + require.NoError(t, jsonpb.Unmarshal(strings.NewReader(string(queryResult.Value)), &simRes)) + + require.Equal(t, gInfo, simRes.GasInfo) + require.Equal(t, result.Log, simRes.Result.Log) + require.Equal(t, result.Events, simRes.Result.Events) + require.True(t, bytes.Equal(result.Data, simRes.Result.Data)) + + app.EndBlock(abci.RequestEndBlock{}) + app.Commit() + } +} + +func TestRunInvalidTransaction(t *testing.T) { + anteOpt := func(bapp *BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + return + }) + } + routerOpt := func(bapp *BaseApp) { + r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { + return &sdk.Result{}, nil + }) + bapp.Router().AddRoute(r) + } + + app := setupBaseApp(t, anteOpt, routerOpt) + + header := tmproto.Header{Height: 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + // transaction with no messages + { + emptyTx := &txTest{} + _, result, err := app.Deliver(aminoTxEncoder(), emptyTx) + require.Error(t, err) + require.Nil(t, result) + + space, code, _ := sdkerrors.ABCIInfo(err, false) + require.EqualValues(t, sdkerrors.ErrInvalidRequest.Codespace(), space, err) + require.EqualValues(t, sdkerrors.ErrInvalidRequest.ABCICode(), code, err) + } + + // transaction where ValidateBasic fails + { + testCases := []struct { + tx *txTest + fail bool + }{ + {newTxCounter(0, 0), false}, + {newTxCounter(-1, 0), false}, + {newTxCounter(100, 100), false}, + {newTxCounter(100, 5, 4, 3, 2, 1), false}, + + {newTxCounter(0, -1), true}, + {newTxCounter(0, 1, -2), true}, + {newTxCounter(0, 1, 2, -10, 5), true}, + } + + for _, testCase := range testCases { + tx := testCase.tx + _, result, err := app.Deliver(aminoTxEncoder(), tx) + + if testCase.fail { + require.Error(t, err) + + space, code, _ := sdkerrors.ABCIInfo(err, false) + require.EqualValues(t, sdkerrors.ErrInvalidSequence.Codespace(), space, err) + require.EqualValues(t, sdkerrors.ErrInvalidSequence.ABCICode(), code, err) + } else { + require.NotNil(t, result) + } + } + } + + // transaction with no known route + { + unknownRouteTx := txTest{[]sdk.Msg{msgNoRoute{}}, 0, false} + _, result, err := app.Deliver(aminoTxEncoder(), unknownRouteTx) + require.Error(t, err) + require.Nil(t, result) + + space, code, _ := sdkerrors.ABCIInfo(err, false) + require.EqualValues(t, sdkerrors.ErrUnknownRequest.Codespace(), space, err) + require.EqualValues(t, sdkerrors.ErrUnknownRequest.ABCICode(), code, err) + + unknownRouteTx = txTest{[]sdk.Msg{msgCounter{}, msgNoRoute{}}, 0, false} + _, result, err = app.Deliver(aminoTxEncoder(), unknownRouteTx) + require.Error(t, err) + require.Nil(t, result) + + space, code, _ = sdkerrors.ABCIInfo(err, false) + require.EqualValues(t, sdkerrors.ErrUnknownRequest.Codespace(), space, err) + require.EqualValues(t, sdkerrors.ErrUnknownRequest.ABCICode(), code, err) + } + + // Transaction with an unregistered message + { + tx := newTxCounter(0, 0) + tx.Msgs = append(tx.Msgs, msgNoDecode{}) + + // new codec so we can encode the tx, but we shouldn't be able to decode + newCdc := codec.NewLegacyAmino() + registerTestCodec(newCdc) + newCdc.RegisterConcrete(&msgNoDecode{}, "cosmos-sdk/baseapp/msgNoDecode", nil) + + txBytes, err := newCdc.Marshal(tx) + require.NoError(t, err) + + res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.EqualValues(t, sdkerrors.ErrTxDecode.ABCICode(), res.Code) + require.EqualValues(t, sdkerrors.ErrTxDecode.Codespace(), res.Codespace) + } +} + +// Test that transactions exceeding gas limits fail +func TestTxGasLimits(t *testing.T) { + gasGranted := uint64(10) + anteOpt := func(bapp *BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + newCtx = ctx.WithGasMeter(sdk.NewGasMeter(gasGranted)) + + // AnteHandlers must have their own defer/recover in order for the BaseApp + // to know how much gas was used! This is because the GasMeter is created in + // the AnteHandler, but if it panics the context won't be set properly in + // runTx's recover call. + defer func() { + if r := recover(); r != nil { + switch rType := r.(type) { + case sdk.ErrorOutOfGas: + err = sdkerrors.Wrapf(sdkerrors.ErrOutOfGas, "out of gas in location: %v", rType.Descriptor) + default: + panic(r) + } + } + }() + + count := tx.(txTest).Counter + newCtx.GasMeter().ConsumeGas(uint64(count), "counter-ante") + + return newCtx, nil + }) + } + + routerOpt := func(bapp *BaseApp) { + r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { + count := msg.(*msgCounter).Counter + ctx.GasMeter().ConsumeGas(uint64(count), "counter-handler") + return &sdk.Result{}, nil + }) + bapp.Router().AddRoute(r) + } + + app := setupBaseApp(t, anteOpt, routerOpt) + + header := tmproto.Header{Height: 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + testCases := []struct { + tx *txTest + gasUsed uint64 + fail bool + }{ + {newTxCounter(0, 0), 0, false}, + {newTxCounter(1, 1), 2, false}, + {newTxCounter(9, 1), 10, false}, + {newTxCounter(1, 9), 10, false}, + {newTxCounter(10, 0), 10, false}, + {newTxCounter(0, 10), 10, false}, + {newTxCounter(0, 8, 2), 10, false}, + {newTxCounter(0, 5, 1, 1, 1, 1, 1), 10, false}, + {newTxCounter(0, 5, 1, 1, 1, 1), 9, false}, + + {newTxCounter(9, 2), 11, true}, + {newTxCounter(2, 9), 11, true}, + {newTxCounter(9, 1, 1), 11, true}, + {newTxCounter(1, 8, 1, 1), 11, true}, + {newTxCounter(11, 0), 11, true}, + {newTxCounter(0, 11), 11, true}, + {newTxCounter(0, 5, 11), 16, true}, + } + + for i, tc := range testCases { + tx := tc.tx + gInfo, result, err := app.Deliver(aminoTxEncoder(), tx) + + // check gas used and wanted + require.Equal(t, tc.gasUsed, gInfo.GasUsed, fmt.Sprintf("tc #%d; gas: %v, result: %v, err: %s", i, gInfo, result, err)) + + // check for out of gas + if !tc.fail { + require.NotNil(t, result, fmt.Sprintf("%d: %v, %v", i, tc, err)) + } else { + require.Error(t, err) + require.Nil(t, result) + + space, code, _ := sdkerrors.ABCIInfo(err, false) + require.EqualValues(t, sdkerrors.ErrOutOfGas.Codespace(), space, err) + require.EqualValues(t, sdkerrors.ErrOutOfGas.ABCICode(), code, err) + } + } +} + +// Test that transactions exceeding gas limits fail +func TestMaxBlockGasLimits(t *testing.T) { + gasGranted := uint64(10) + anteOpt := func(bapp *BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + newCtx = ctx.WithGasMeter(sdk.NewGasMeter(gasGranted)) + + defer func() { + if r := recover(); r != nil { + switch rType := r.(type) { + case sdk.ErrorOutOfGas: + err = sdkerrors.Wrapf(sdkerrors.ErrOutOfGas, "out of gas in location: %v", rType.Descriptor) + default: + panic(r) + } + } + }() + + count := tx.(txTest).Counter + newCtx.GasMeter().ConsumeGas(uint64(count), "counter-ante") + + return + }) + } + + routerOpt := func(bapp *BaseApp) { + r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { + count := msg.(*msgCounter).Counter + ctx.GasMeter().ConsumeGas(uint64(count), "counter-handler") + return &sdk.Result{}, nil + }) + bapp.Router().AddRoute(r) + } + + app := setupBaseApp(t, anteOpt, routerOpt) + app.InitChain(abci.RequestInitChain{ + ConsensusParams: &abci.ConsensusParams{ + Block: &abci.BlockParams{ + MaxGas: 100, + }, + }, + }) + + testCases := []struct { + tx *txTest + numDelivers int + gasUsedPerDeliver uint64 + fail bool + failAfterDeliver int + }{ + {newTxCounter(0, 0), 0, 0, false, 0}, + {newTxCounter(9, 1), 2, 10, false, 0}, + {newTxCounter(10, 0), 3, 10, false, 0}, + {newTxCounter(10, 0), 10, 10, false, 0}, + {newTxCounter(2, 7), 11, 9, false, 0}, + {newTxCounter(10, 0), 10, 10, false, 0}, // hit the limit but pass + + {newTxCounter(10, 0), 11, 10, true, 10}, + {newTxCounter(10, 0), 15, 10, true, 10}, + {newTxCounter(9, 0), 12, 9, true, 11}, // fly past the limit + } + + for i, tc := range testCases { + tx := tc.tx + + // reset the block gas + header := tmproto.Header{Height: app.LastBlockHeight() + 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + // execute the transaction multiple times + for j := 0; j < tc.numDelivers; j++ { + _, result, err := app.Deliver(aminoTxEncoder(), tx) + + ctx := app.getState(runTxModeDeliver).ctx + + // check for failed transactions + if tc.fail && (j+1) > tc.failAfterDeliver { + require.Error(t, err, fmt.Sprintf("tc #%d; result: %v, err: %s", i, result, err)) + require.Nil(t, result, fmt.Sprintf("tc #%d; result: %v, err: %s", i, result, err)) + + space, code, _ := sdkerrors.ABCIInfo(err, false) + require.EqualValues(t, sdkerrors.ErrOutOfGas.Codespace(), space, err) + require.EqualValues(t, sdkerrors.ErrOutOfGas.ABCICode(), code, err) + require.True(t, ctx.BlockGasMeter().IsOutOfGas()) + } else { + // check gas used and wanted + blockGasUsed := ctx.BlockGasMeter().GasConsumed() + expBlockGasUsed := tc.gasUsedPerDeliver * uint64(j+1) + require.Equal( + t, expBlockGasUsed, blockGasUsed, + fmt.Sprintf("%d,%d: %v, %v, %v, %v", i, j, tc, expBlockGasUsed, blockGasUsed, result), + ) + + require.NotNil(t, result, fmt.Sprintf("tc #%d; currDeliver: %d, result: %v, err: %s", i, j, result, err)) + require.False(t, ctx.BlockGasMeter().IsPastLimit()) + } + } + } +} + +// Test custom panic handling within app.DeliverTx method +func TestCustomRunTxPanicHandler(t *testing.T) { + const customPanicMsg = "test panic" + anteErr := sdkerrors.Register("fakeModule", 100500, "fakeError") + + anteOpt := func(bapp *BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + panic(sdkerrors.Wrap(anteErr, "anteHandler")) + }) + } + routerOpt := func(bapp *BaseApp) { + r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { + return &sdk.Result{}, nil + }) + bapp.Router().AddRoute(r) + } + + app := setupBaseApp(t, anteOpt, routerOpt) + + header := tmproto.Header{Height: 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + app.AddRunTxRecoveryHandler(func(recoveryObj interface{}) error { + err, ok := recoveryObj.(error) + if !ok { + return nil + } + + if anteErr.Is(err) { + panic(customPanicMsg) + } else { + return nil + } + }) + + // Transaction should panic with custom handler above + { + tx := newTxCounter(0, 0) + + require.PanicsWithValue(t, customPanicMsg, func() { app.Deliver(aminoTxEncoder(), tx) }) + } +} + +func TestBaseAppAnteHandler(t *testing.T) { + anteKey := []byte("ante-key") + anteOpt := func(bapp *BaseApp) { + bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) + } + + deliverKey := []byte("deliver-key") + routerOpt := func(bapp *BaseApp) { + r := sdk.NewRoute(routeMsgCounter, handlerMsgCounter(t, capKey1, deliverKey)) + bapp.Router().AddRoute(r) + } + + cdc := codec.NewLegacyAmino() + app := setupBaseApp(t, anteOpt, routerOpt) + + app.InitChain(abci.RequestInitChain{}) + registerTestCodec(cdc) + + header := tmproto.Header{Height: app.LastBlockHeight() + 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + // execute a tx that will fail ante handler execution + // + // NOTE: State should not be mutated here. This will be implicitly checked by + // the next txs ante handler execution (anteHandlerTxTest). + tx := newTxCounter(0, 0) + tx.setFailOnAnte(true) + txBytes, err := cdc.Marshal(tx) + require.NoError(t, err) + res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.Empty(t, res.Events) + require.False(t, res.IsOK(), fmt.Sprintf("%v", res)) + + ctx := app.getState(runTxModeDeliver).ctx + store := ctx.KVStore(capKey1) + require.Equal(t, int64(0), getIntFromStore(store, anteKey)) + + // execute at tx that will pass the ante handler (the checkTx state should + // mutate) but will fail the message handler + tx = newTxCounter(0, 0) + tx.setFailOnHandler(true) + + txBytes, err = cdc.Marshal(tx) + require.NoError(t, err) + + res = app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + // should emit ante event + require.NotEmpty(t, res.Events) + require.False(t, res.IsOK(), fmt.Sprintf("%v", res)) + + ctx = app.getState(runTxModeDeliver).ctx + store = ctx.KVStore(capKey1) + require.Equal(t, int64(1), getIntFromStore(store, anteKey)) + require.Equal(t, int64(0), getIntFromStore(store, deliverKey)) + + // execute a successful ante handler and message execution where state is + // implicitly checked by previous tx executions + tx = newTxCounter(1, 0) + + txBytes, err = cdc.Marshal(tx) + require.NoError(t, err) + + res = app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.NotEmpty(t, res.Events) + require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) + + ctx = app.getState(runTxModeDeliver).ctx + store = ctx.KVStore(capKey1) + require.Equal(t, int64(2), getIntFromStore(store, anteKey)) + require.Equal(t, int64(1), getIntFromStore(store, deliverKey)) + + // commit + app.EndBlock(abci.RequestEndBlock{}) + app.Commit() +} + +func TestGasConsumptionBadTx(t *testing.T) { + gasWanted := uint64(5) + anteOpt := func(bapp *BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + newCtx = ctx.WithGasMeter(sdk.NewGasMeter(gasWanted)) + + defer func() { + if r := recover(); r != nil { + switch rType := r.(type) { + case sdk.ErrorOutOfGas: + log := fmt.Sprintf("out of gas in location: %v", rType.Descriptor) + err = sdkerrors.Wrap(sdkerrors.ErrOutOfGas, log) + default: + panic(r) + } + } + }() + + txTest := tx.(txTest) + newCtx.GasMeter().ConsumeGas(uint64(txTest.Counter), "counter-ante") + if txTest.FailOnAnte { + return newCtx, sdkerrors.Wrap(sdkerrors.ErrUnauthorized, "ante handler failure") + } + + return + }) + } + + routerOpt := func(bapp *BaseApp) { + r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { + count := msg.(*msgCounter).Counter + ctx.GasMeter().ConsumeGas(uint64(count), "counter-handler") + return &sdk.Result{}, nil + }) + bapp.Router().AddRoute(r) + } + + cdc := codec.NewLegacyAmino() + registerTestCodec(cdc) + + app := setupBaseApp(t, anteOpt, routerOpt) + app.InitChain(abci.RequestInitChain{ + ConsensusParams: &abci.ConsensusParams{ + Block: &abci.BlockParams{ + MaxGas: 9, + }, + }, + }) + + app.InitChain(abci.RequestInitChain{}) + + header := tmproto.Header{Height: app.LastBlockHeight() + 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + tx := newTxCounter(5, 0) + tx.setFailOnAnte(true) + txBytes, err := cdc.Marshal(tx) + require.NoError(t, err) + + res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.False(t, res.IsOK(), fmt.Sprintf("%v", res)) + + // require next tx to fail due to black gas limit + tx = newTxCounter(5, 0) + txBytes, err = cdc.Marshal(tx) + require.NoError(t, err) + + res = app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.False(t, res.IsOK(), fmt.Sprintf("%v", res)) +} + +func TestInitChainer(t *testing.T) { + name := t.Name() + // keep the db and logger ourselves so + // we can reload the same app later + db := dbm.NewMemDB() + logger := defaultLogger() + app := NewBaseApp(name, logger, db, nil) + capKey := sdk.NewKVStoreKey("main") + capKey2 := sdk.NewKVStoreKey("key2") + app.MountStores(capKey, capKey2) + + // set a value in the store on init chain + key, value := []byte("hello"), []byte("goodbye") + var initChainer sdk.InitChainer = func(ctx sdk.Context, req abci.RequestInitChain) abci.ResponseInitChain { + store := ctx.KVStore(capKey) + store.Set(key, value) + return abci.ResponseInitChain{} + } + + query := abci.RequestQuery{ + Path: "/store/main/key", + Data: key, + } + + // initChainer is nil - nothing happens + app.InitChain(abci.RequestInitChain{}) + res := app.Query(query) + require.Equal(t, 0, len(res.Value)) + + // set initChainer and try again - should see the value + app.SetInitChainer(initChainer) + + // stores are mounted and private members are set - sealing baseapp + err := app.LoadLatestVersion() // needed to make stores non-nil + require.Nil(t, err) + require.Equal(t, int64(0), app.LastBlockHeight()) + + initChainRes := app.InitChain(abci.RequestInitChain{AppStateBytes: []byte("{}"), ChainId: "test-chain-id"}) // must have valid JSON genesis file, even if empty + + // The AppHash returned by a new chain is the sha256 hash of "". + // $ echo -n '' | sha256sum + // e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 + require.Equal( + t, + []byte{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}, + initChainRes.AppHash, + ) + + // assert that chainID is set correctly in InitChain + chainID := app.deliverState.ctx.ChainID() + require.Equal(t, "test-chain-id", chainID, "ChainID in deliverState not set correctly in InitChain") + + chainID = app.checkState.ctx.ChainID() + require.Equal(t, "test-chain-id", chainID, "ChainID in checkState not set correctly in InitChain") + + app.Commit() + res = app.Query(query) + require.Equal(t, int64(1), app.LastBlockHeight()) + require.Equal(t, value, res.Value) + + // reload app + app = NewBaseApp(name, logger, db, nil) + app.SetInitChainer(initChainer) + app.MountStores(capKey, capKey2) + err = app.LoadLatestVersion() // needed to make stores non-nil + require.Nil(t, err) + require.Equal(t, int64(1), app.LastBlockHeight()) + + // ensure we can still query after reloading + res = app.Query(query) + require.Equal(t, value, res.Value) + + // commit and ensure we can still query + header := tmproto.Header{Height: app.LastBlockHeight() + 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + app.Commit() + + res = app.Query(query) + require.Equal(t, value, res.Value) +} + +func TestInitChain_WithInitialHeight(t *testing.T) { + name := t.Name() + db := dbm.NewMemDB() + logger := defaultLogger() + app := NewBaseApp(name, logger, db, nil) + + app.InitChain( + abci.RequestInitChain{ + InitialHeight: 3, + }, + ) + app.Commit() + + require.Equal(t, int64(3), app.LastBlockHeight()) +} + +func TestBeginBlock_WithInitialHeight(t *testing.T) { + name := t.Name() + db := dbm.NewMemDB() + logger := defaultLogger() + app := NewBaseApp(name, logger, db, nil) + + app.InitChain( + abci.RequestInitChain{ + InitialHeight: 3, + }, + ) + + require.PanicsWithError(t, "invalid height: 4; expected: 3", func() { + app.BeginBlock(abci.RequestBeginBlock{ + Header: tmproto.Header{ + Height: 4, + }, + }) + }) + + app.BeginBlock(abci.RequestBeginBlock{ + Header: tmproto.Header{ + Height: 3, + }, + }) + app.Commit() + + require.Equal(t, int64(3), app.LastBlockHeight()) +} + +// Simple tx with a list of Msgs. +type txTest struct { + Msgs []sdk.Msg + Counter int64 + FailOnAnte bool +} + +func (tx *txTest) setFailOnAnte(fail bool) { + tx.FailOnAnte = fail +} + +func (tx *txTest) setFailOnHandler(fail bool) { + for i, msg := range tx.Msgs { + tx.Msgs[i] = msgCounter{msg.(msgCounter).Counter, fail} + } +} + +// Implements Tx +func (tx txTest) GetMsgs() []sdk.Msg { return tx.Msgs } +func (tx txTest) ValidateBasic() error { return nil } + +const ( + routeMsgCounter = "msgCounter" + routeMsgCounter2 = "msgCounter2" + routeMsgKeyValue = "msgKeyValue" +) + +// ValidateBasic() fails on negative counters. +// Otherwise it's up to the handlers +type msgCounter struct { + Counter int64 + FailOnHandler bool +} + +// dummy implementation of proto.Message +func (msg msgCounter) Reset() {} +func (msg msgCounter) String() string { return "TODO" } +func (msg msgCounter) ProtoMessage() {} + +// Implements Msg +func (msg msgCounter) Route() string { return routeMsgCounter } +func (msg msgCounter) Type() string { return "counter1" } +func (msg msgCounter) GetSignBytes() []byte { return nil } +func (msg msgCounter) GetSigners() []sdk.AccAddress { return nil } +func (msg msgCounter) ValidateBasic() error { + if msg.Counter >= 0 { + return nil + } + return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "counter should be a non-negative integer") +} + +func newTxCounter(counter int64, msgCounters ...int64) *txTest { + msgs := make([]sdk.Msg, 0, len(msgCounters)) + for _, c := range msgCounters { + msgs = append(msgs, msgCounter{c, false}) + } + + return &txTest{msgs, counter, false} +} + +// a msg we dont know how to route +type msgNoRoute struct { + msgCounter +} + +func (tx msgNoRoute) Route() string { return "noroute" } + +// a msg we dont know how to decode +type msgNoDecode struct { + msgCounter +} + +func (tx msgNoDecode) Route() string { return routeMsgCounter } + +// Another counter msg. Duplicate of msgCounter +type msgCounter2 struct { + Counter int64 +} + +// dummy implementation of proto.Message +func (msg msgCounter2) Reset() {} +func (msg msgCounter2) String() string { return "TODO" } +func (msg msgCounter2) ProtoMessage() {} + +// Implements Msg +func (msg msgCounter2) Route() string { return routeMsgCounter2 } +func (msg msgCounter2) Type() string { return "counter2" } +func (msg msgCounter2) GetSignBytes() []byte { return nil } +func (msg msgCounter2) GetSigners() []sdk.AccAddress { return nil } +func (msg msgCounter2) ValidateBasic() error { + if msg.Counter >= 0 { + return nil + } + return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "counter should be a non-negative integer") +} + +// A msg that sets a key/value pair. +type msgKeyValue struct { + Key []byte + Value []byte +} + +func (msg msgKeyValue) Reset() {} +func (msg msgKeyValue) String() string { return "TODO" } +func (msg msgKeyValue) ProtoMessage() {} +func (msg msgKeyValue) Route() string { return routeMsgKeyValue } +func (msg msgKeyValue) Type() string { return "keyValue" } +func (msg msgKeyValue) GetSignBytes() []byte { return nil } +func (msg msgKeyValue) GetSigners() []sdk.AccAddress { return nil } +func (msg msgKeyValue) ValidateBasic() error { + if msg.Key == nil { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "key cannot be nil") + } + if msg.Value == nil { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "value cannot be nil") + } + return nil +} + +// amino decode +func testTxDecoder(cdc *codec.LegacyAmino) sdk.TxDecoder { + return func(txBytes []byte) (sdk.Tx, error) { + var tx txTest + if len(txBytes) == 0 { + return nil, sdkerrors.Wrap(sdkerrors.ErrTxDecode, "tx bytes are empty") + } + + err := cdc.Unmarshal(txBytes, &tx) + if err != nil { + return nil, sdkerrors.ErrTxDecode + } + + return tx, nil + } +} + +func anteHandlerTxTest(t *testing.T, capKey sdk.StoreKey, storeKey []byte) sdk.AnteHandler { + return func(ctx sdk.Context, tx sdk.Tx, simulate bool) (sdk.Context, error) { + store := ctx.KVStore(capKey) + txTest := tx.(txTest) + + if txTest.FailOnAnte { + return ctx, sdkerrors.Wrap(sdkerrors.ErrUnauthorized, "ante handler failure") + } + + _, err := incrementingCounter(t, store, storeKey, txTest.Counter) + if err != nil { + return ctx, err + } + + ctx.EventManager().EmitEvents( + counterEvent("ante_handler", txTest.Counter), + ) + + return ctx, nil + } +} + +func counterEvent(evType string, msgCount int64) sdk.Events { + return sdk.Events{ + sdk.NewEvent( + evType, + sdk.NewAttribute("update_counter", fmt.Sprintf("%d", msgCount)), + ), + } +} + +func handlerMsgCounter(t *testing.T, capKey sdk.StoreKey, deliverKey []byte) sdk.Handler { + return func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { + ctx = ctx.WithEventManager(sdk.NewEventManager()) + store := ctx.KVStore(capKey) + var msgCount int64 + + switch m := msg.(type) { + case *msgCounter: + if m.FailOnHandler { + return nil, sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "message handler failure") + } + + msgCount = m.Counter + case *msgCounter2: + msgCount = m.Counter + } + + ctx.EventManager().EmitEvents( + counterEvent(sdk.EventTypeMessage, msgCount), + ) + + res, err := incrementingCounter(t, store, deliverKey, msgCount) + if err != nil { + return nil, err + } + + res.Events = ctx.EventManager().Events().ToABCIEvents() + return res, nil + } +} + +func getIntFromStore(store sdk.KVStore, key []byte) int64 { + bz := store.Get(key) + if len(bz) == 0 { + return 0 + } + i, err := binary.ReadVarint(bytes.NewBuffer(bz)) + if err != nil { + panic(err) + } + return i +} + +func setIntOnStore(store sdk.KVStore, key []byte, i int64) { + bz := make([]byte, 8) + n := binary.PutVarint(bz, i) + store.Set(key, bz[:n]) +} + +// check counter matches what's in store. +// increment and store +func incrementingCounter(t *testing.T, store sdk.KVStore, counterKey []byte, counter int64) (*sdk.Result, error) { + storedCounter := getIntFromStore(store, counterKey) + require.Equal(t, storedCounter, counter) + setIntOnStore(store, counterKey, counter+1) + return &sdk.Result{}, nil +} + +//--------------------------------------------------------------------- +// Tx processing - CheckTx, DeliverTx, SimulateTx. +// These tests use the serialized tx as input, while most others will use the +// Check(), Deliver(), Simulate() methods directly. +// Ensure that Check/Deliver/Simulate work as expected with the store. + +// Test that successive CheckTx can see each others' effects +// on the store within a block, and that the CheckTx state +// gets reset to the latest committed state during Commit +func TestCheckTx(t *testing.T) { + // This ante handler reads the key and checks that the value matches the current counter. + // This ensures changes to the kvstore persist across successive CheckTx. + counterKey := []byte("counter-key") + + anteOpt := func(bapp *BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, counterKey)) } + routerOpt := func(bapp *BaseApp) { + // TODO: can remove this once CheckTx doesnt process msgs. + bapp.Router().AddRoute(sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { + return &sdk.Result{}, nil + })) + } + + app := setupBaseApp(t, anteOpt, routerOpt) + + nTxs := int64(5) + app.InitChain(abci.RequestInitChain{}) + + // Create same codec used in txDecoder + codec := codec.NewLegacyAmino() + registerTestCodec(codec) + + for i := int64(0); i < nTxs; i++ { + tx := newTxCounter(i, 0) // no messages + txBytes, err := codec.Marshal(tx) + require.NoError(t, err) + r := app.CheckTx(abci.RequestCheckTx{Tx: txBytes}) + require.Empty(t, r.GetEvents()) + require.True(t, r.IsOK(), fmt.Sprintf("%v", r)) + } + + checkStateStore := app.checkState.ctx.KVStore(capKey1) + storedCounter := getIntFromStore(checkStateStore, counterKey) + + // Ensure AnteHandler ran + require.Equal(t, nTxs, storedCounter) + + // If a block is committed, CheckTx state should be reset. + header := tmproto.Header{Height: 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header, Hash: []byte("hash")}) + + require.NotNil(t, app.checkState.ctx.BlockGasMeter(), "block gas meter should have been set to checkState") + require.NotEmpty(t, app.checkState.ctx.HeaderHash()) + + app.EndBlock(abci.RequestEndBlock{}) + app.Commit() + + checkStateStore = app.checkState.ctx.KVStore(capKey1) + storedBytes := checkStateStore.Get(counterKey) + require.Nil(t, storedBytes) +} + +// Test that successive DeliverTx can see each others' effects +// on the store, both within and across blocks. +func TestDeliverTx(t *testing.T) { + // test increments in the ante + anteKey := []byte("ante-key") + anteOpt := func(bapp *BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) } + + // test increments in the handler + deliverKey := []byte("deliver-key") + routerOpt := func(bapp *BaseApp) { + r := sdk.NewRoute(routeMsgCounter, handlerMsgCounter(t, capKey1, deliverKey)) + bapp.Router().AddRoute(r) + } + + app := setupBaseApp(t, anteOpt, routerOpt) + app.InitChain(abci.RequestInitChain{}) + + // Create same codec used in txDecoder + codec := codec.NewLegacyAmino() + registerTestCodec(codec) + + nBlocks := 3 + txPerHeight := 5 + + for blockN := 0; blockN < nBlocks; blockN++ { + header := tmproto.Header{Height: int64(blockN) + 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + for i := 0; i < txPerHeight; i++ { + counter := int64(blockN*txPerHeight + i) + tx := newTxCounter(counter, counter) + + txBytes, err := codec.Marshal(tx) + require.NoError(t, err) + + res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) + events := res.GetEvents() + require.Len(t, events, 3, "should contain ante handler, message type and counter events respectively") + require.Equal(t, sdk.MarkEventsToIndex(counterEvent("ante_handler", counter).ToABCIEvents(), map[string]struct{}{})[0], events[0], "ante handler event") + require.Equal(t, sdk.MarkEventsToIndex(counterEvent(sdk.EventTypeMessage, counter).ToABCIEvents(), map[string]struct{}{})[0], events[2], "msg handler update counter event") + } + + app.EndBlock(abci.RequestEndBlock{}) + app.Commit() + } +} + +func TestOptionFunction(t *testing.T) { + logger := defaultLogger() + db := dbm.NewMemDB() + bap := NewBaseApp("starting name", logger, db, nil, testChangeNameHelper("new name")) + require.Equal(t, bap.name, "new name", "BaseApp should have had name changed via option function") +} + +func testChangeNameHelper(name string) func(*BaseApp) { + return func(bap *BaseApp) { + bap.name = name + } +} + +// Test that txs can be unmarshalled and read and that +// correct error codes are returned when not +func TestTxDecoder(t *testing.T) { + codec := codec.NewLegacyAmino() + registerTestCodec(codec) + + app := newBaseApp(t.Name()) + tx := newTxCounter(1, 0) + txBytes := codec.MustMarshal(tx) + + dTx, err := app.txDecoder(txBytes) + require.NoError(t, err) + + cTx := dTx.(txTest) + require.Equal(t, tx.Counter, cTx.Counter) +} + +// Test that Info returns the latest committed state. +func TestInfo(t *testing.T) { + app := newBaseApp(t.Name()) + + // ----- test an empty response ------- + reqInfo := abci.RequestInfo{} + res := app.Info(reqInfo) + + // should be empty + assert.Equal(t, "", res.Version) + assert.Equal(t, t.Name(), res.GetData()) + assert.Equal(t, int64(0), res.LastBlockHeight) + require.Equal(t, []uint8(nil), res.LastBlockAppHash) + require.Equal(t, app.AppVersion(), res.AppVersion) + // ----- test a proper response ------- + // TODO +} + +func TestBaseAppOptionSeal(t *testing.T) { + app := setupBaseApp(t) + + require.Panics(t, func() { + app.SetName("") + }) + require.Panics(t, func() { + app.SetVersion("") + }) + require.Panics(t, func() { + app.SetDB(nil) + }) + require.Panics(t, func() { + app.SetCMS(nil) + }) + require.Panics(t, func() { + app.SetInitChainer(nil) + }) + require.Panics(t, func() { + app.SetBeginBlocker(nil) + }) + require.Panics(t, func() { + app.SetEndBlocker(nil) + }) + require.Panics(t, func() { + app.SetAnteHandler(nil) + }) + require.Panics(t, func() { + app.SetAddrPeerFilter(nil) + }) + require.Panics(t, func() { + app.SetIDPeerFilter(nil) + }) + require.Panics(t, func() { + app.SetFauxMerkleMode() + }) + require.Panics(t, func() { + app.SetRouter(NewRouter()) + }) +} + +func TestVersionSetterGetter(t *testing.T) { + logger := defaultLogger() + pruningOpt := SetPruning(store.PruneDefault) + db := dbm.NewMemDB() + name := t.Name() + app := NewBaseApp(name, logger, db, nil, pruningOpt) + + require.Equal(t, "", app.Version()) + res := app.Query(abci.RequestQuery{Path: "app/version"}) + require.True(t, res.IsOK()) + require.Equal(t, "", string(res.Value)) + + versionString := "1.0.0" + app.SetVersion(versionString) + require.Equal(t, versionString, app.Version()) + res = app.Query(abci.RequestQuery{Path: "app/version"}) + require.True(t, res.IsOK()) + require.Equal(t, versionString, string(res.Value)) +} + +func TestLoadVersionInvalid(t *testing.T) { + logger := log.NewNopLogger() + pruningOpt := SetPruning(store.PruneNothing) + db := dbm.NewMemDB() + name := t.Name() + app := NewBaseApp(name, logger, db, nil, pruningOpt) + + err := app.LoadLatestVersion() + require.Nil(t, err) + + // require error when loading an invalid version + err = app.LoadVersion(-1) + require.Error(t, err) + + header := tmproto.Header{Height: 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + res := app.Commit() + commitID1 := sdk.CommitID{Version: 1, Hash: res.Data} + + // create a new app with the stores mounted under the same cap key + app = NewBaseApp(name, logger, db, nil, pruningOpt) + + // require we can load the latest version + err = app.LoadVersion(1) + require.Nil(t, err) + testLoadVersionHelper(t, app, int64(1), commitID1) + + // require error when loading an invalid version + err = app.LoadVersion(2) + require.Error(t, err) +} + +// simple one store baseapp with data and snapshots. Each tx is 1 MB in size (uncompressed). +func setupBaseAppWithSnapshots(t *testing.T, blocks uint, blockTxs int, options ...func(*BaseApp)) (*BaseApp, func()) { + codec := codec.NewLegacyAmino() + registerTestCodec(codec) + routerOpt := func(bapp *BaseApp) { + bapp.Router().AddRoute(sdk.NewRoute(routeMsgKeyValue, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { + kv := msg.(*msgKeyValue) + bapp.cms.GetCommitKVStore(capKey2).Set(kv.Key, kv.Value) + return &sdk.Result{}, nil + })) + } + + snapshotInterval := uint64(2) + snapshotTimeout := 1 * time.Minute + snapshotDir, err := os.MkdirTemp("", "baseapp") + require.NoError(t, err) + snapshotStore, err := snapshots.NewStore(dbm.NewMemDB(), snapshotDir) + require.NoError(t, err) + teardown := func() { + os.RemoveAll(snapshotDir) + } + + app := setupBaseApp(t, append(options, + SetSnapshotStore(snapshotStore), + SetSnapshotInterval(snapshotInterval), + SetPruning(sdk.PruningOptions{KeepEvery: 1}), + routerOpt)...) + + app.InitChain(abci.RequestInitChain{}) + + r := rand.New(rand.NewSource(3920758213583)) + keyCounter := 0 + for height := int64(1); height <= int64(blocks); height++ { + app.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{Height: height}}) + for txNum := 0; txNum < blockTxs; txNum++ { + tx := txTest{Msgs: []sdk.Msg{}} + for msgNum := 0; msgNum < 100; msgNum++ { + key := []byte(fmt.Sprintf("%v", keyCounter)) + value := make([]byte, 10000) + _, err := r.Read(value) + require.NoError(t, err) + tx.Msgs = append(tx.Msgs, msgKeyValue{Key: key, Value: value}) + keyCounter++ + } + txBytes, err := codec.Marshal(tx) + require.NoError(t, err) + resp := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.True(t, resp.IsOK(), "%v", resp.String()) + } + app.EndBlock(abci.RequestEndBlock{Height: height}) + app.Commit() + + // Wait for snapshot to be taken, since it happens asynchronously. + if uint64(height)%snapshotInterval == 0 { + start := time.Now() + for { + if time.Since(start) > snapshotTimeout { + t.Errorf("timed out waiting for snapshot after %v", snapshotTimeout) + } + snapshot, err := snapshotStore.Get(uint64(height), snapshottypes.CurrentFormat) + require.NoError(t, err) + if snapshot != nil { + break + } + time.Sleep(100 * time.Millisecond) + } + } + } + + return app, teardown +} + +func TestMountStores(t *testing.T) { + app := setupBaseApp(t) + + // check both stores + store1 := app.cms.GetCommitKVStore(capKey1) + require.NotNil(t, store1) + store2 := app.cms.GetCommitKVStore(capKey2) + require.NotNil(t, store2) +} + +// Test that we can make commits and then reload old versions. +// Test that LoadLatestVersion actually does. +func TestLoadVersion(t *testing.T) { + logger := defaultLogger() + pruningOpt := SetPruning(store.PruneNothing) + db := dbm.NewMemDB() + name := t.Name() + app := NewBaseApp(name, logger, db, nil, pruningOpt) + + // make a cap key and mount the store + err := app.LoadLatestVersion() // needed to make stores non-nil + require.Nil(t, err) + + emptyCommitID := sdk.CommitID{} + + // fresh store has zero/empty last commit + lastHeight := app.LastBlockHeight() + lastID := app.LastCommitID() + require.Equal(t, int64(0), lastHeight) + require.Equal(t, emptyCommitID, lastID) + + // execute a block, collect commit ID + header := tmproto.Header{Height: 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + res := app.Commit() + commitID1 := sdk.CommitID{Version: 1, Hash: res.Data} + + // execute a block, collect commit ID + header = tmproto.Header{Height: 2} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + res = app.Commit() + commitID2 := sdk.CommitID{Version: 2, Hash: res.Data} + + // reload with LoadLatestVersion + app = NewBaseApp(name, logger, db, nil, pruningOpt) + app.MountStores() + err = app.LoadLatestVersion() + require.Nil(t, err) + testLoadVersionHelper(t, app, int64(2), commitID2) + + // reload with LoadVersion, see if you can commit the same block and get + // the same result + app = NewBaseApp(name, logger, db, nil, pruningOpt) + err = app.LoadVersion(1) + require.Nil(t, err) + testLoadVersionHelper(t, app, int64(1), commitID1) + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + app.Commit() + testLoadVersionHelper(t, app, int64(2), commitID2) +} + +func useDefaultLoader(app *BaseApp) { + app.SetStoreLoader(DefaultStoreLoader) +} + +func initStore(t *testing.T, db dbm.DB, storeKey string, k, v []byte) { + rs := rootmulti.NewStore(db, log.NewNopLogger()) + rs.SetPruning(store.PruneNothing) + key := sdk.NewKVStoreKey(storeKey) + rs.MountStoreWithDB(key, store.StoreTypeIAVL, nil) + err := rs.LoadLatestVersion() + require.Nil(t, err) + require.Equal(t, int64(0), rs.LastCommitID().Version) + + // write some data in substore + kv, _ := rs.GetStore(key).(store.KVStore) + require.NotNil(t, kv) + kv.Set(k, v) + commitID := rs.Commit() + require.Equal(t, int64(1), commitID.Version) +} + +func checkStore(t *testing.T, db dbm.DB, ver int64, storeKey string, k, v []byte) { + rs := rootmulti.NewStore(db, log.NewNopLogger()) + rs.SetPruning(store.PruneDefault) + key := sdk.NewKVStoreKey(storeKey) + rs.MountStoreWithDB(key, store.StoreTypeIAVL, nil) + err := rs.LoadLatestVersion() + require.Nil(t, err) + require.Equal(t, ver, rs.LastCommitID().Version) + + // query data in substore + kv, _ := rs.GetStore(key).(store.KVStore) + require.NotNil(t, kv) + require.Equal(t, v, kv.Get(k)) +} + +// Test that we can make commits and then reload old versions. +// Test that LoadLatestVersion actually does. +func TestSetLoader(t *testing.T) { + cases := map[string]struct { + setLoader func(*BaseApp) + origStoreKey string + loadStoreKey string + }{ + "don't set loader": { + origStoreKey: "foo", + loadStoreKey: "foo", + }, + "default loader": { + setLoader: useDefaultLoader, + origStoreKey: "foo", + loadStoreKey: "foo", + }, + } + + k := []byte("key") + v := []byte("value") + + for name, tc := range cases { + tc := tc + t.Run(name, func(t *testing.T) { + // prepare a db with some data + db := dbm.NewMemDB() + initStore(t, db, tc.origStoreKey, k, v) + + // load the app with the existing db + opts := []func(*BaseApp){SetPruning(store.PruneNothing)} + if tc.setLoader != nil { + opts = append(opts, tc.setLoader) + } + app := NewBaseApp(t.Name(), defaultLogger(), db, nil, opts...) + app.MountStores(sdk.NewKVStoreKey(tc.loadStoreKey)) + err := app.LoadLatestVersion() + require.Nil(t, err) + + // "execute" one block + app.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{Height: 2}}) + res := app.Commit() + require.NotNil(t, res.Data) + + // check db is properly updated + checkStore(t, db, 2, tc.loadStoreKey, k, v) + checkStore(t, db, 2, tc.loadStoreKey, []byte("foo"), nil) + }) + } +} diff --git a/baseapp/msg_service_router.go b/baseapp/msg_service_router.go index fc4c1339059..a8c997cf683 100644 --- a/baseapp/msg_service_router.go +++ b/baseapp/msg_service_router.go @@ -112,6 +112,15 @@ func (msr *MsgServiceRouter) RegisterService(sd *grpc.ServiceDesc, handler inter goCtx = context.WithValue(goCtx, sdk.SdkContextKey, ctx) return handler(goCtx, req) } + if err := req.ValidateBasic(); err != nil { + if mm, ok := req.(getter1); ok { + if !mm.GetAmount().Amount.IsZero() { + return nil, err + } + } else { + return nil, err + } + } // Call the method handler from the service description with the handler object. // We don't do any decoding here because the decoding was already done. res, err := methodHandler(handler, sdk.WrapSDKContext(ctx), noopDecoder, interceptor) @@ -138,3 +147,7 @@ func noopDecoder(_ interface{}) error { return nil } func noopInterceptor(_ context.Context, _ interface{}, _ *grpc.UnaryServerInfo, _ grpc.UnaryHandler) (interface{}, error) { return nil, nil } + +type getter1 interface { + GetAmount() sdk.Coin +} diff --git a/baseapp/p2p.go b/baseapp/p2p.go new file mode 100644 index 00000000000..4c339dfad0d --- /dev/null +++ b/baseapp/p2p.go @@ -0,0 +1,66 @@ +package baseapp + +// This file exists because Tendermint allows the application to control which peers it connects to. +// This is for an interesting idea -- allow the application to control the peer layer/ topology! +// It would be really exciting to mix web of trust and expander-graph style primitives +// for how information gets disseminated. +// However the API surface for this to make sense isn't really well exposed / thought through, +// so this file mostly acts as confusing boilerplate. + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + abci "github.com/tendermint/tendermint/abci/types" +) + +type peerFilters struct { + addrPeerFilter sdk.PeerFilter // filter peers by address and port + idPeerFilter sdk.PeerFilter // filter peers by node ID +} + +// FilterPeerByAddrPort filters peers by address/port. +func (app *BaseApp) FilterPeerByAddrPort(info string) abci.ResponseQuery { + if app.addrPeerFilter != nil { + return app.addrPeerFilter(info) + } + + return abci.ResponseQuery{} +} + +// FilterPeerByID filters peers by node ID. +func (app *BaseApp) FilterPeerByID(info string) abci.ResponseQuery { + if app.idPeerFilter != nil { + return app.idPeerFilter(info) + } + + return abci.ResponseQuery{} +} + +func handleQueryP2P(app *BaseApp, path []string) abci.ResponseQuery { + // "/p2p" prefix for p2p queries + if len(path) < 4 { + return sdkerrors.QueryResultWithDebug( + sdkerrors.Wrap( + sdkerrors.ErrUnknownRequest, "path should be p2p filter ", + ), app.trace) + } + + var resp abci.ResponseQuery + + cmd, typ, arg := path[1], path[2], path[3] + switch cmd { + case "filter": + switch typ { + case "addr": + resp = app.FilterPeerByAddrPort(arg) + + case "id": + resp = app.FilterPeerByID(arg) + } + + default: + resp = sdkerrors.QueryResultWithDebug(sdkerrors.Wrap(sdkerrors.ErrUnknownRequest, "expected second parameter to be 'filter'"), app.trace) + } + + return resp +} diff --git a/baseapp/queryrouter.go b/baseapp/queryrouter.go index 1727b2ab2df..13bef6ad08f 100644 --- a/baseapp/queryrouter.go +++ b/baseapp/queryrouter.go @@ -2,6 +2,7 @@ package baseapp import ( "fmt" + "strings" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -21,16 +22,22 @@ func NewQueryRouter() *QueryRouter { // AddRoute adds a query path to the router with a given Querier. It will panic // if a duplicate route is given. The route must be alphanumeric. -func (qrt *QueryRouter) AddRoute(path string, q sdk.Querier) sdk.QueryRouter { - if !sdk.IsAlphaNumeric(path) { +func (qrt *QueryRouter) AddRoute(route string, q sdk.Querier) sdk.QueryRouter { + if !sdk.IsAlphaNumeric(route) { panic("route expressions can only contain alphanumeric characters") } - if qrt.routes[path] != nil { - panic(fmt.Sprintf("route %s has already been initialized", path)) + // paths are only the final extensions! + // Needed to ensure erroneous queries don't get into the state machine. + if strings.Contains(route, "/") { + panic("route's don't contain '/'") } - qrt.routes[path] = q + if qrt.routes[route] != nil { + panic(fmt.Sprintf("route %s has already been initialized", route)) + } + + qrt.routes[route] = q return qrt } diff --git a/baseapp/testutil/buf.gen.yaml b/baseapp/testutil/buf.gen.yaml new file mode 100644 index 00000000000..d7d17bbb26f --- /dev/null +++ b/baseapp/testutil/buf.gen.yaml @@ -0,0 +1,5 @@ +version: v1 +plugins: + - name: gocosmos + out: ../.. + opt: plugins=grpc,Mgoogle/protobuf/any.proto=github.com/cosmos/cosmos-sdk/codec/types diff --git a/baseapp/testutil/buf.lock b/baseapp/testutil/buf.lock new file mode 100644 index 00000000000..c6f890bd4b3 --- /dev/null +++ b/baseapp/testutil/buf.lock @@ -0,0 +1,17 @@ +# Generated by buf. DO NOT EDIT. +version: v1 +deps: + - remote: buf.build + owner: cosmos + repository: cosmos-proto + branch: main + commit: 9e9a53f8db0d493f8b8c66d458c767c1 + digest: b1-6w7Hozd_Oo_yZ1Sku8Nhz9qou-4licLr6VmEyeI9jO4= + create_time: 2021-12-02T20:41:47.795828Z + - remote: buf.build + owner: cosmos + repository: gogo-proto + branch: main + commit: bee5511075b7499da6178d9e4aaa628b + digest: b1-rrBIustouD-S80cVoZ_rM0qJsmei9AgbXy9GPQu6vxg= + create_time: 2021-12-02T20:01:17.069307Z diff --git a/baseapp/testutil/buf.yaml b/baseapp/testutil/buf.yaml new file mode 100644 index 00000000000..e6f82c0cdcd --- /dev/null +++ b/baseapp/testutil/buf.yaml @@ -0,0 +1,4 @@ +version: v1 +deps: + - buf.build/cosmos/gogo-proto + - buf.build/cosmos/cosmos-proto diff --git a/baseapp/testutil/messages.go b/baseapp/testutil/messages.go new file mode 100644 index 00000000000..f0950eedc6b --- /dev/null +++ b/baseapp/testutil/messages.go @@ -0,0 +1,60 @@ +package testutil + +import ( + "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/types/msgservice" +) + +func RegisterInterfaces(registry types.InterfaceRegistry) { + registry.RegisterImplementations( + (*sdk.Msg)(nil), + &MsgCounter{}, + &MsgCounter2{}, + &MsgKeyValue{}, + ) + msgservice.RegisterMsgServiceDesc(registry, &_Counter_serviceDesc) + msgservice.RegisterMsgServiceDesc(registry, &_Counter2_serviceDesc) + msgservice.RegisterMsgServiceDesc(registry, &_KeyValue_serviceDesc) +} + +var _ sdk.Msg = &MsgCounter{} + +func (msg *MsgCounter) GetSigners() []sdk.AccAddress { return []sdk.AccAddress{} } +func (msg *MsgCounter) ValidateBasic() error { + if msg.Counter >= 0 { + return nil + } + return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "counter should be a non-negative integer") +} + +var _ sdk.Msg = &MsgCounter2{} + +func (msg *MsgCounter2) GetSigners() []sdk.AccAddress { return []sdk.AccAddress{} } +func (msg *MsgCounter2) ValidateBasic() error { + if msg.Counter >= 0 { + return nil + } + return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "counter should be a non-negative integer") +} + +var _ sdk.Msg = &MsgKeyValue{} + +func (msg *MsgKeyValue) GetSigners() []sdk.AccAddress { + if msg.Signer == "" { + return []sdk.AccAddress{} + } + + return []sdk.AccAddress{sdk.MustAccAddressFromBech32(msg.Signer)} +} + +func (msg *MsgKeyValue) ValidateBasic() error { + if msg.Key == nil { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "key cannot be nil") + } + if msg.Value == nil { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "value cannot be nil") + } + return nil +} diff --git a/baseapp/testutil/messages.pb.go b/baseapp/testutil/messages.pb.go new file mode 100644 index 00000000000..9a19f22013a --- /dev/null +++ b/baseapp/testutil/messages.pb.go @@ -0,0 +1,1293 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: messages.proto + +package testutil + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/cosmos-sdk/codec/types" + _ "github.com/gogo/protobuf/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type MsgCounter struct { + Counter int64 `protobuf:"varint,1,opt,name=counter,proto3" json:"counter,omitempty"` + FailOnHandler bool `protobuf:"varint,2,opt,name=fail_on_handler,json=failOnHandler,proto3" json:"fail_on_handler,omitempty"` +} + +func (m *MsgCounter) Reset() { *m = MsgCounter{} } +func (m *MsgCounter) String() string { return proto.CompactTextString(m) } +func (*MsgCounter) ProtoMessage() {} +func (*MsgCounter) Descriptor() ([]byte, []int) { + return fileDescriptor_4dc296cbfe5ffcd5, []int{0} +} +func (m *MsgCounter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCounter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCounter.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCounter) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCounter.Merge(m, src) +} +func (m *MsgCounter) XXX_Size() int { + return m.Size() +} +func (m *MsgCounter) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCounter.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCounter proto.InternalMessageInfo + +func (m *MsgCounter) GetCounter() int64 { + if m != nil { + return m.Counter + } + return 0 +} + +func (m *MsgCounter) GetFailOnHandler() bool { + if m != nil { + return m.FailOnHandler + } + return false +} + +type MsgCounter2 struct { + Counter int64 `protobuf:"varint,1,opt,name=counter,proto3" json:"counter,omitempty"` + FailOnHandler bool `protobuf:"varint,2,opt,name=fail_on_handler,json=failOnHandler,proto3" json:"fail_on_handler,omitempty"` +} + +func (m *MsgCounter2) Reset() { *m = MsgCounter2{} } +func (m *MsgCounter2) String() string { return proto.CompactTextString(m) } +func (*MsgCounter2) ProtoMessage() {} +func (*MsgCounter2) Descriptor() ([]byte, []int) { + return fileDescriptor_4dc296cbfe5ffcd5, []int{1} +} +func (m *MsgCounter2) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCounter2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCounter2.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCounter2) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCounter2.Merge(m, src) +} +func (m *MsgCounter2) XXX_Size() int { + return m.Size() +} +func (m *MsgCounter2) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCounter2.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCounter2 proto.InternalMessageInfo + +func (m *MsgCounter2) GetCounter() int64 { + if m != nil { + return m.Counter + } + return 0 +} + +func (m *MsgCounter2) GetFailOnHandler() bool { + if m != nil { + return m.FailOnHandler + } + return false +} + +type MsgCreateCounterResponse struct { +} + +func (m *MsgCreateCounterResponse) Reset() { *m = MsgCreateCounterResponse{} } +func (m *MsgCreateCounterResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCreateCounterResponse) ProtoMessage() {} +func (*MsgCreateCounterResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4dc296cbfe5ffcd5, []int{2} +} +func (m *MsgCreateCounterResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateCounterResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateCounterResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateCounterResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateCounterResponse.Merge(m, src) +} +func (m *MsgCreateCounterResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateCounterResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateCounterResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateCounterResponse proto.InternalMessageInfo + +type MsgKeyValue struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Signer string `protobuf:"bytes,3,opt,name=signer,proto3" json:"signer,omitempty"` +} + +func (m *MsgKeyValue) Reset() { *m = MsgKeyValue{} } +func (m *MsgKeyValue) String() string { return proto.CompactTextString(m) } +func (*MsgKeyValue) ProtoMessage() {} +func (*MsgKeyValue) Descriptor() ([]byte, []int) { + return fileDescriptor_4dc296cbfe5ffcd5, []int{3} +} +func (m *MsgKeyValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgKeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgKeyValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgKeyValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgKeyValue.Merge(m, src) +} +func (m *MsgKeyValue) XXX_Size() int { + return m.Size() +} +func (m *MsgKeyValue) XXX_DiscardUnknown() { + xxx_messageInfo_MsgKeyValue.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgKeyValue proto.InternalMessageInfo + +func (m *MsgKeyValue) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *MsgKeyValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *MsgKeyValue) GetSigner() string { + if m != nil { + return m.Signer + } + return "" +} + +type MsgCreateKeyValueResponse struct { +} + +func (m *MsgCreateKeyValueResponse) Reset() { *m = MsgCreateKeyValueResponse{} } +func (m *MsgCreateKeyValueResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCreateKeyValueResponse) ProtoMessage() {} +func (*MsgCreateKeyValueResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4dc296cbfe5ffcd5, []int{4} +} +func (m *MsgCreateKeyValueResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateKeyValueResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateKeyValueResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateKeyValueResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateKeyValueResponse.Merge(m, src) +} +func (m *MsgCreateKeyValueResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateKeyValueResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateKeyValueResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateKeyValueResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgCounter)(nil), "testdata.MsgCounter") + proto.RegisterType((*MsgCounter2)(nil), "testdata.MsgCounter2") + proto.RegisterType((*MsgCreateCounterResponse)(nil), "testdata.MsgCreateCounterResponse") + proto.RegisterType((*MsgKeyValue)(nil), "testdata.MsgKeyValue") + proto.RegisterType((*MsgCreateKeyValueResponse)(nil), "testdata.MsgCreateKeyValueResponse") +} + +func init() { proto.RegisterFile("messages.proto", fileDescriptor_4dc296cbfe5ffcd5) } + +var fileDescriptor_4dc296cbfe5ffcd5 = []byte{ + // 378 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x92, 0xc1, 0x8a, 0x9b, 0x50, + 0x14, 0x86, 0x63, 0xa5, 0x89, 0x3d, 0x4d, 0xdb, 0x20, 0x69, 0x31, 0x16, 0x24, 0x58, 0x28, 0xd9, + 0x44, 0xc1, 0x3e, 0x41, 0xdb, 0x45, 0x5b, 0x5a, 0x1b, 0xb0, 0xd0, 0x61, 0x66, 0x13, 0xae, 0xe6, + 0xe4, 0x46, 0xa2, 0xf7, 0x8a, 0xf7, 0x3a, 0x90, 0xb7, 0x98, 0xc7, 0x9a, 0x65, 0x96, 0xb3, 0x1c, + 0x92, 0x17, 0x19, 0xd4, 0x98, 0x30, 0xc1, 0xc5, 0x2c, 0x66, 0xe5, 0x39, 0xff, 0x0f, 0xdf, 0xcf, + 0xf9, 0xbd, 0xf0, 0x36, 0x45, 0x21, 0x08, 0x45, 0xe1, 0x64, 0x39, 0x97, 0x5c, 0xd7, 0x24, 0x0a, + 0xb9, 0x20, 0x92, 0x98, 0x43, 0xca, 0x29, 0xaf, 0x44, 0xb7, 0x9c, 0x6a, 0xdf, 0x1c, 0x51, 0xce, + 0x69, 0x82, 0x6e, 0xb5, 0x85, 0xc5, 0xd2, 0x25, 0x6c, 0x53, 0x5b, 0xf6, 0x5f, 0x00, 0x5f, 0xd0, + 0xef, 0xbc, 0x60, 0x12, 0x73, 0xdd, 0x80, 0x5e, 0x54, 0x8f, 0x86, 0x32, 0x56, 0x26, 0x6a, 0xd0, + 0xac, 0xfa, 0x67, 0x78, 0xb7, 0x24, 0x71, 0x32, 0xe7, 0x6c, 0xbe, 0x22, 0x6c, 0x91, 0x60, 0x6e, + 0xbc, 0x18, 0x2b, 0x13, 0x2d, 0x78, 0x53, 0xca, 0x33, 0xf6, 0xb3, 0x16, 0xed, 0x19, 0xbc, 0x3e, + 0xf1, 0xbc, 0x67, 0x00, 0x9a, 0x60, 0x94, 0xc0, 0x1c, 0x89, 0xc4, 0x03, 0x36, 0x40, 0x91, 0x71, + 0x26, 0xd0, 0xf6, 0xab, 0xb0, 0xdf, 0xb8, 0xf9, 0x4f, 0x92, 0x02, 0xf5, 0x01, 0xa8, 0x6b, 0xdc, + 0x54, 0x41, 0xfd, 0xa0, 0x1c, 0xf5, 0x21, 0xbc, 0xbc, 0x2e, 0xad, 0x0a, 0xdd, 0x0f, 0xea, 0x45, + 0xff, 0x00, 0x5d, 0x11, 0x53, 0x86, 0xb9, 0xa1, 0x8e, 0x95, 0xc9, 0xab, 0xe0, 0xb0, 0xd9, 0x1f, + 0x61, 0x74, 0x8c, 0x6a, 0xa0, 0x4d, 0x96, 0x77, 0x01, 0xbd, 0xa6, 0xa5, 0x3f, 0x30, 0xf8, 0xc5, + 0xa2, 0x1c, 0x53, 0x64, 0xb2, 0xd1, 0x86, 0x4e, 0xf3, 0x0f, 0x9c, 0xd3, 0xfd, 0xa6, 0xfd, 0x58, + 0x6d, 0x3b, 0xc2, 0xbb, 0x04, 0xed, 0x58, 0x97, 0xdf, 0x42, 0x7e, 0xdf, 0x46, 0xf6, 0x9e, 0x84, + 0xf6, 0x41, 0x3b, 0x96, 0xf3, 0x15, 0xd4, 0x7f, 0x28, 0xcf, 0x68, 0x8d, 0x6b, 0x7e, 0x6a, 0xa1, + 0x9d, 0x57, 0xf0, 0xed, 0xc7, 0xed, 0xce, 0x52, 0xb6, 0x3b, 0x4b, 0xb9, 0xdf, 0x59, 0xca, 0xcd, + 0xde, 0xea, 0x6c, 0xf7, 0x56, 0xe7, 0x6e, 0x6f, 0x75, 0xae, 0xa6, 0x34, 0x96, 0xab, 0x22, 0x74, + 0x22, 0x9e, 0xba, 0x11, 0x17, 0x29, 0x17, 0x87, 0xcf, 0x54, 0x2c, 0xd6, 0x6e, 0x48, 0x04, 0x92, + 0x2c, 0x73, 0xcb, 0x88, 0x42, 0xc6, 0x49, 0xd8, 0xad, 0xde, 0xde, 0x97, 0x87, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x63, 0x31, 0xab, 0xcc, 0xc8, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// CounterClient is the client API for Counter service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type CounterClient interface { + IncrementCounter(ctx context.Context, in *MsgCounter, opts ...grpc.CallOption) (*MsgCreateCounterResponse, error) +} + +type counterClient struct { + cc grpc1.ClientConn +} + +func NewCounterClient(cc grpc1.ClientConn) CounterClient { + return &counterClient{cc} +} + +func (c *counterClient) IncrementCounter(ctx context.Context, in *MsgCounter, opts ...grpc.CallOption) (*MsgCreateCounterResponse, error) { + out := new(MsgCreateCounterResponse) + err := c.cc.Invoke(ctx, "/testdata.Counter/IncrementCounter", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// CounterServer is the server API for Counter service. +type CounterServer interface { + IncrementCounter(context.Context, *MsgCounter) (*MsgCreateCounterResponse, error) +} + +// UnimplementedCounterServer can be embedded to have forward compatible implementations. +type UnimplementedCounterServer struct { +} + +func (*UnimplementedCounterServer) IncrementCounter(ctx context.Context, req *MsgCounter) (*MsgCreateCounterResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method IncrementCounter not implemented") +} + +func RegisterCounterServer(s grpc1.Server, srv CounterServer) { + s.RegisterService(&_Counter_serviceDesc, srv) +} + +func _Counter_IncrementCounter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCounter) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CounterServer).IncrementCounter(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/testdata.Counter/IncrementCounter", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CounterServer).IncrementCounter(ctx, req.(*MsgCounter)) + } + return interceptor(ctx, in, info, handler) +} + +var _Counter_serviceDesc = grpc.ServiceDesc{ + ServiceName: "testdata.Counter", + HandlerType: (*CounterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "IncrementCounter", + Handler: _Counter_IncrementCounter_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "messages.proto", +} + +// Counter2Client is the client API for Counter2 service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type Counter2Client interface { + IncrementCounter(ctx context.Context, in *MsgCounter2, opts ...grpc.CallOption) (*MsgCreateCounterResponse, error) +} + +type counter2Client struct { + cc grpc1.ClientConn +} + +func NewCounter2Client(cc grpc1.ClientConn) Counter2Client { + return &counter2Client{cc} +} + +func (c *counter2Client) IncrementCounter(ctx context.Context, in *MsgCounter2, opts ...grpc.CallOption) (*MsgCreateCounterResponse, error) { + out := new(MsgCreateCounterResponse) + err := c.cc.Invoke(ctx, "/testdata.Counter2/IncrementCounter", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Counter2Server is the server API for Counter2 service. +type Counter2Server interface { + IncrementCounter(context.Context, *MsgCounter2) (*MsgCreateCounterResponse, error) +} + +// UnimplementedCounter2Server can be embedded to have forward compatible implementations. +type UnimplementedCounter2Server struct { +} + +func (*UnimplementedCounter2Server) IncrementCounter(ctx context.Context, req *MsgCounter2) (*MsgCreateCounterResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method IncrementCounter not implemented") +} + +func RegisterCounter2Server(s grpc1.Server, srv Counter2Server) { + s.RegisterService(&_Counter2_serviceDesc, srv) +} + +func _Counter2_IncrementCounter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCounter2) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(Counter2Server).IncrementCounter(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/testdata.Counter2/IncrementCounter", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(Counter2Server).IncrementCounter(ctx, req.(*MsgCounter2)) + } + return interceptor(ctx, in, info, handler) +} + +var _Counter2_serviceDesc = grpc.ServiceDesc{ + ServiceName: "testdata.Counter2", + HandlerType: (*Counter2Server)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "IncrementCounter", + Handler: _Counter2_IncrementCounter_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "messages.proto", +} + +// KeyValueClient is the client API for KeyValue service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type KeyValueClient interface { + Set(ctx context.Context, in *MsgKeyValue, opts ...grpc.CallOption) (*MsgCreateKeyValueResponse, error) +} + +type keyValueClient struct { + cc grpc1.ClientConn +} + +func NewKeyValueClient(cc grpc1.ClientConn) KeyValueClient { + return &keyValueClient{cc} +} + +func (c *keyValueClient) Set(ctx context.Context, in *MsgKeyValue, opts ...grpc.CallOption) (*MsgCreateKeyValueResponse, error) { + out := new(MsgCreateKeyValueResponse) + err := c.cc.Invoke(ctx, "/testdata.KeyValue/Set", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// KeyValueServer is the server API for KeyValue service. +type KeyValueServer interface { + Set(context.Context, *MsgKeyValue) (*MsgCreateKeyValueResponse, error) +} + +// UnimplementedKeyValueServer can be embedded to have forward compatible implementations. +type UnimplementedKeyValueServer struct { +} + +func (*UnimplementedKeyValueServer) Set(ctx context.Context, req *MsgKeyValue) (*MsgCreateKeyValueResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Set not implemented") +} + +func RegisterKeyValueServer(s grpc1.Server, srv KeyValueServer) { + s.RegisterService(&_KeyValue_serviceDesc, srv) +} + +func _KeyValue_Set_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgKeyValue) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyValueServer).Set(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/testdata.KeyValue/Set", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyValueServer).Set(ctx, req.(*MsgKeyValue)) + } + return interceptor(ctx, in, info, handler) +} + +var _KeyValue_serviceDesc = grpc.ServiceDesc{ + ServiceName: "testdata.KeyValue", + HandlerType: (*KeyValueServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Set", + Handler: _KeyValue_Set_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "messages.proto", +} + +func (m *MsgCounter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCounter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCounter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.FailOnHandler { + i-- + if m.FailOnHandler { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Counter != 0 { + i = encodeVarintMessages(dAtA, i, uint64(m.Counter)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MsgCounter2) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCounter2) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCounter2) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.FailOnHandler { + i-- + if m.FailOnHandler { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Counter != 0 { + i = encodeVarintMessages(dAtA, i, uint64(m.Counter)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MsgCreateCounterResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateCounterResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateCounterResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgKeyValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgKeyValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgKeyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintMessages(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0x1a + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintMessages(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintMessages(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgCreateKeyValueResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateKeyValueResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateKeyValueResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintMessages(dAtA []byte, offset int, v uint64) int { + offset -= sovMessages(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgCounter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Counter != 0 { + n += 1 + sovMessages(uint64(m.Counter)) + } + if m.FailOnHandler { + n += 2 + } + return n +} + +func (m *MsgCounter2) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Counter != 0 { + n += 1 + sovMessages(uint64(m.Counter)) + } + if m.FailOnHandler { + n += 2 + } + return n +} + +func (m *MsgCreateCounterResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgKeyValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovMessages(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovMessages(uint64(l)) + } + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovMessages(uint64(l)) + } + return n +} + +func (m *MsgCreateKeyValueResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovMessages(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMessages(x uint64) (n int) { + return sovMessages(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgCounter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCounter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCounter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Counter", wireType) + } + m.Counter = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Counter |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailOnHandler", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.FailOnHandler = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipMessages(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCounter2) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCounter2: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCounter2: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Counter", wireType) + } + m.Counter = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Counter |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailOnHandler", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.FailOnHandler = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipMessages(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreateCounterResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateCounterResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateCounterResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMessages(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgKeyValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgKeyValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgKeyValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessages + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessages + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessages + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessages + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMessages + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMessages + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMessages(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreateKeyValueResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateKeyValueResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateKeyValueResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMessages(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMessages(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMessages + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMessages + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMessages + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMessages + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMessages + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMessages + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMessages = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMessages = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMessages = fmt.Errorf("proto: unexpected end of group") +) diff --git a/baseapp/testutil/messages.proto b/baseapp/testutil/messages.proto new file mode 100644 index 00000000000..866e3366698 --- /dev/null +++ b/baseapp/testutil/messages.proto @@ -0,0 +1,39 @@ +syntax = "proto3"; +package testdata; + +import "gogoproto/gogo.proto"; +import "google/protobuf/any.proto"; + +option go_package = "github.com/cosmos/cosmos-sdk/baseapp/testutil"; + +message MsgCounter { + int64 counter = 1; + bool fail_on_handler = 2; +} + +message MsgCounter2 { + int64 counter = 1; + bool fail_on_handler = 2; +} + +message MsgCreateCounterResponse {} + +message MsgKeyValue { + bytes key = 1; + bytes value = 2; + string signer = 3; +} + +message MsgCreateKeyValueResponse {} + +service Counter { + rpc IncrementCounter(MsgCounter) returns (MsgCreateCounterResponse); +} + +service Counter2 { + rpc IncrementCounter(MsgCounter2) returns (MsgCreateCounterResponse); +} + +service KeyValue { + rpc Set(MsgKeyValue) returns (MsgCreateKeyValueResponse); +} \ No newline at end of file diff --git a/contrib/images/simd-env/Dockerfile b/contrib/images/simd-env/Dockerfile index b83c3b831ba..8adbc8dd8ef 100644 --- a/contrib/images/simd-env/Dockerfile +++ b/contrib/images/simd-env/Dockerfile @@ -2,6 +2,7 @@ FROM golang:1.18-alpine AS build RUN apk add build-base git linux-headers WORKDIR /work COPY go.mod go.sum /work/ +COPY ./ics23/go/go.mod /work/ics23/go/go.mod RUN go mod download COPY ./ /work diff --git a/go.mod b/go.mod index 5430426d22e..58e6f879acf 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/confio/ics23/go v0.7.0 github.com/cosmos/btcutil v1.0.4 github.com/cosmos/go-bip39 v1.0.0 - github.com/cosmos/iavl v0.19.2-0.20220916140702-9b6be3095313 + github.com/cosmos/iavl v0.19.3 github.com/cosmos/ledger-cosmos-go v0.11.1 github.com/gogo/gateway v1.1.0 github.com/gogo/protobuf v1.3.3 @@ -125,6 +125,9 @@ require ( replace ( github.com/99designs/keyring => github.com/cosmos/keyring v1.1.7-0.20210622111912-ef00f8ac3d76 + // vendor ics23 + github.com/confio/ics23/go => ./ics23/go + // Fix upstream GHSA-h395-qcrw-5vmq vulnerability. // TODO Remove it: https://github.com/cosmos/cosmos-sdk/issues/10409 github.com/gin-gonic/gin => github.com/gin-gonic/gin v1.7.0 diff --git a/go.sum b/go.sum index 27715505c4d..35ec958f0e3 100644 --- a/go.sum +++ b/go.sum @@ -146,8 +146,6 @@ github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:z github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coinbase/rosetta-sdk-go v0.7.0 h1:lmTO/JEpCvZgpbkOITL95rA80CPKb5CtMzLaqF2mCNg= github.com/coinbase/rosetta-sdk-go v0.7.0/go.mod h1:7nD3oBPIiHqhRprqvMgPoGxe/nyq3yftRmpsy29coWE= -github.com/confio/ics23/go v0.7.0 h1:00d2kukk7sPoHWL4zZBZwzxnpA2pec1NPdwbSokJ5w8= -github.com/confio/ics23/go v0.7.0/go.mod h1:E45NqnlpxGnpfTWL/xauN7MRwEE28T4Dd4uraToOaKg= github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= @@ -160,8 +158,8 @@ github.com/cosmos/btcutil v1.0.4/go.mod h1:Ffqc8Hn6TJUdDgHBwIZLtrLQC1KdJ9jGJl/Tv github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y= github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= -github.com/cosmos/iavl v0.19.2-0.20220916140702-9b6be3095313 h1:R7CnaI/0OLwOusy7n9750n8fqQ3yCQ8OJQI2L3ws9RA= -github.com/cosmos/iavl v0.19.2-0.20220916140702-9b6be3095313/go.mod h1:X9PKD3J0iFxdmgNLa7b2LYWdsGd90ToV5cAONApkEPw= +github.com/cosmos/iavl v0.19.3 h1:cESO0OwTTxQm5rmyESKW+zESheDUYI7CcZDWWDwnuxg= +github.com/cosmos/iavl v0.19.3/go.mod h1:X9PKD3J0iFxdmgNLa7b2LYWdsGd90ToV5cAONApkEPw= github.com/cosmos/keyring v1.1.7-0.20210622111912-ef00f8ac3d76 h1:DdzS1m6o/pCqeZ8VOAit/gyATedRgjvkVI+UCrLpyuU= github.com/cosmos/keyring v1.1.7-0.20210622111912-ef00f8ac3d76/go.mod h1:0mkLWIoZuQ7uBoospo5Q9zIpqq6rYCPJDSUdeCJvPM8= github.com/cosmos/ledger-cosmos-go v0.11.1 h1:9JIYsGnXP613pb2vPjFeMMjBI5lEDsEaF6oYorTy6J4= diff --git a/ics23/.gitignore b/ics23/.gitignore new file mode 100644 index 00000000000..22d0d82f809 --- /dev/null +++ b/ics23/.gitignore @@ -0,0 +1 @@ +vendor diff --git a/ics23/go/Makefile b/ics23/go/Makefile new file mode 100644 index 00000000000..aebfcb20d75 --- /dev/null +++ b/ics23/go/Makefile @@ -0,0 +1,20 @@ +.PHONY: protoc test + +# make sure we turn on go modules +export GO111MODULE := on + +# PROTOC_FLAGS := -I=.. -I=./vendor -I=$(GOPATH)/src +PROTOC_FLAGS := -I=.. -I=$(GOPATH)/src + +test: + go test . + +protoc: +# @go mod vendor + protoc --gocosmos_out=plugins=interfacetype+grpc,Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types:. $(PROTOC_FLAGS) ../proofs.proto + +install-proto-dep: + @echo "Installing protoc-gen-gocosmos..." + @go install github.com/regen-network/cosmos-proto/protoc-gen-gocosmos + + diff --git a/ics23/go/compress.go b/ics23/go/compress.go new file mode 100644 index 00000000000..ebe3275e389 --- /dev/null +++ b/ics23/go/compress.go @@ -0,0 +1,157 @@ +package ics23 + +// IsCompressed returns true if the proof was compressed +func IsCompressed(proof *CommitmentProof) bool { + return proof.GetCompressed() != nil +} + +// Compress will return a CompressedBatchProof if the input is BatchProof +// Otherwise it will return the input. +// This is safe to call multiple times (idempotent) +func Compress(proof *CommitmentProof) *CommitmentProof { + batch := proof.GetBatch() + if batch == nil { + return proof + } + return &CommitmentProof{ + Proof: &CommitmentProof_Compressed{ + Compressed: compress(batch), + }, + } +} + +// Decompress will return a BatchProof if the input is CompressedBatchProof +// Otherwise it will return the input. +// This is safe to call multiple times (idempotent) +func Decompress(proof *CommitmentProof) *CommitmentProof { + comp := proof.GetCompressed() + if comp != nil { + return &CommitmentProof{ + Proof: &CommitmentProof_Batch{ + Batch: decompress(comp), + }, + } + } + return proof +} + +func compress(batch *BatchProof) *CompressedBatchProof { + var centries []*CompressedBatchEntry + var lookup []*InnerOp + registry := make(map[string]int32) + + for _, entry := range batch.Entries { + centry := compressEntry(entry, &lookup, registry) + centries = append(centries, centry) + } + + return &CompressedBatchProof{ + Entries: centries, + LookupInners: lookup, + } +} + +func compressEntry(entry *BatchEntry, lookup *[]*InnerOp, registry map[string]int32) *CompressedBatchEntry { + if exist := entry.GetExist(); exist != nil { + return &CompressedBatchEntry{ + Proof: &CompressedBatchEntry_Exist{ + Exist: compressExist(exist, lookup, registry), + }, + } + } + + non := entry.GetNonexist() + return &CompressedBatchEntry{ + Proof: &CompressedBatchEntry_Nonexist{ + Nonexist: &CompressedNonExistenceProof{ + Key: non.Key, + Left: compressExist(non.Left, lookup, registry), + Right: compressExist(non.Right, lookup, registry), + }, + }, + } +} + +func compressExist(exist *ExistenceProof, lookup *[]*InnerOp, registry map[string]int32) *CompressedExistenceProof { + if exist == nil { + return nil + } + res := &CompressedExistenceProof{ + Key: exist.Key, + Value: exist.Value, + Leaf: exist.Leaf, + Path: make([]int32, len(exist.Path)), + } + for i, step := range exist.Path { + res.Path[i] = compressStep(step, lookup, registry) + } + return res +} + +func compressStep(step *InnerOp, lookup *[]*InnerOp, registry map[string]int32) int32 { + bz, err := step.Marshal() + if err != nil { + panic(err) + } + sig := string(bz) + + // load from cache if there + if num, ok := registry[sig]; ok { + return num + } + + // create new step if not there + num := int32(len(*lookup)) + *lookup = append(*lookup, step) + registry[sig] = num + return num +} + +func decompress(comp *CompressedBatchProof) *BatchProof { + lookup := comp.LookupInners + + var entries []*BatchEntry + + for _, centry := range comp.Entries { + entry := decompressEntry(centry, lookup) + entries = append(entries, entry) + } + + return &BatchProof{ + Entries: entries, + } +} + +// TendermintSpec constrains the format from proofs-tendermint (crypto/merkle SimpleProof) +var TendermintSpec = &ProofSpec{ + LeafSpec: &LeafOp{ + Prefix: []byte{0}, + PrehashKey: HashOp_NO_HASH, + Hash: HashOp_SHA256, + PrehashValue: HashOp_SHA256, + Length: LengthOp_VAR_PROTO, + }, + InnerSpec: &InnerSpec{ + ChildOrder: []int32{0, 1}, + MinPrefixLength: 1, + MaxPrefixLength: 1, + ChildSize: 32, // (no length byte) + Hash: HashOp_SHA256, + }, +} + +func decompressExist(exist *CompressedExistenceProof, lookup []*InnerOp) *ExistenceProof { + if exist == nil { + return nil + } + res := &ExistenceProof{ + Key: exist.Key, + Value: exist.Value, + Leaf: exist.Leaf, + Path: make([]*InnerOp, len(exist.Path)), + } + for i, step := range exist.Path { + res.Path[i] = lookup[step] + } + return res +} diff --git a/ics23/go/go.mod b/ics23/go/go.mod new file mode 100644 index 00000000000..4588c6961d2 --- /dev/null +++ b/ics23/go/go.mod @@ -0,0 +1,10 @@ +module github.com/confio/ics23/go + +go 1.14 + +require ( + github.com/gogo/protobuf v1.3.1 + github.com/pkg/errors v0.8.1 + github.com/stretchr/testify v1.8.0 // indirect + golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 +) diff --git a/ics23/go/go.sum b/ics23/go/go.sum new file mode 100644 index 00000000000..9f866e054cf --- /dev/null +++ b/ics23/go/go.sum @@ -0,0 +1,28 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/ics23/go/ics23.go b/ics23/go/ics23.go new file mode 100644 index 00000000000..9a79dc3fc30 --- /dev/null +++ b/ics23/go/ics23.go @@ -0,0 +1,175 @@ +/* +* +This implements the client side functions as specified in +https://github.com/cosmos/ics/tree/master/spec/ics-023-vector-commitments + +In particular: + + // Assumes ExistenceProof + type verifyMembership = (root: CommitmentRoot, proof: CommitmentProof, key: Key, value: Value) => boolean + + // Assumes NonExistenceProof + type verifyNonMembership = (root: CommitmentRoot, proof: CommitmentProof, key: Key) => boolean + + // Assumes BatchProof - required ExistenceProofs may be a subset of all items proven + type batchVerifyMembership = (root: CommitmentRoot, proof: CommitmentProof, items: Map) => boolean + + // Assumes BatchProof - required NonExistenceProofs may be a subset of all items proven + type batchVerifyNonMembership = (root: CommitmentRoot, proof: CommitmentProof, keys: Set) => boolean + +We make an adjustment to accept a Spec to ensure the provided proof is in the format of the expected merkle store. +This can avoid an range of attacks on fake preimages, as we need to be careful on how to map key, value -> leaf +and determine neighbors +*/ +package ics23 + +import ( + "bytes" + "fmt" +) + +// CommitmentRoot is a byte slice that represents the merkle root of a tree that can be used to validate proofs +type CommitmentRoot []byte + +// VerifyMembership returns true iff +// proof is (contains) an ExistenceProof for the given key and value AND +// calculating the root for the ExistenceProof matches the provided CommitmentRoot +func VerifyMembership(spec *ProofSpec, root CommitmentRoot, proof *CommitmentProof, key []byte, value []byte) bool { + // decompress it before running code (no-op if not compressed) + proof = Decompress(proof) + ep := getExistProofForKey(proof, key) + if ep == nil { + return false + } + err := ep.Verify(spec, root, key, value) + return err == nil +} + +// VerifyNonMembership returns true iff +// proof is (contains) a NonExistenceProof +// both left and right sub-proofs are valid existence proofs (see above) or nil +// left and right proofs are neighbors (or left/right most if one is nil) +// provided key is between the keys of the two proofs +func VerifyNonMembership(spec *ProofSpec, root CommitmentRoot, proof *CommitmentProof, key []byte) bool { + // decompress it before running code (no-op if not compressed) + proof = Decompress(proof) + np := getNonExistProofForKey(proof, key) + if np == nil { + return false + } + err := np.Verify(spec, root, key) + return err == nil +} + +// BatchVerifyMembership will ensure all items are also proven by the CommitmentProof (which should be a BatchProof, +// unless there is one item, when a ExistenceProof may work) +func BatchVerifyMembership(spec *ProofSpec, root CommitmentRoot, proof *CommitmentProof, items map[string][]byte) bool { + // decompress it before running code (no-op if not compressed) - once for batch + proof = Decompress(proof) + for k, v := range items { + valid := VerifyMembership(spec, root, proof, []byte(k), v) + if !valid { + return false + } + } + return true +} + +// BatchVerifyNonMembership will ensure all items are also proven to not be in the Commitment by the CommitmentProof +// (which should be a BatchProof, unless there is one item, when a NonExistenceProof may work) +func BatchVerifyNonMembership(spec *ProofSpec, root CommitmentRoot, proof *CommitmentProof, keys [][]byte) bool { + // decompress it before running code (no-op if not compressed) - once for batch + proof = Decompress(proof) + for _, k := range keys { + valid := VerifyNonMembership(spec, root, proof, k) + if !valid { + return false + } + } + return true +} + +// CombineProofs takes a number of commitment proofs (simple or batch) and +// converts them into a batch and compresses them. +// +// This is designed for proof generation libraries to create efficient batches +func CombineProofs(proofs []*CommitmentProof) (*CommitmentProof, error) { + var entries []*BatchEntry + + for _, proof := range proofs { + if ex := proof.GetExist(); ex != nil { + entry := &BatchEntry{ + Proof: &BatchEntry_Exist{ + Exist: ex, + }, + } + entries = append(entries, entry) + } else if non := proof.GetNonexist(); non != nil { + entry := &BatchEntry{ + Proof: &BatchEntry_Nonexist{ + Nonexist: non, + }, + } + entries = append(entries, entry) + } else if batch := proof.GetBatch(); batch != nil { + entries = append(entries, batch.Entries...) + } else if comp := proof.GetCompressed(); comp != nil { + decomp := Decompress(proof) + entries = append(entries, decomp.GetBatch().Entries...) + } else { + return nil, fmt.Errorf("proof neither exist or nonexist: %#v", proof.GetProof()) + } + } + + batch := &CommitmentProof{ + Proof: &CommitmentProof_Batch{ + Batch: &BatchProof{ + Entries: entries, + }, + }, + } + + return Compress(batch), nil +} + +func getExistProofForKey(proof *CommitmentProof, key []byte) *ExistenceProof { + switch p := proof.Proof.(type) { + case *CommitmentProof_Exist: + ep := p.Exist + if bytes.Equal(ep.Key, key) { + return ep + } + case *CommitmentProof_Batch: + for _, sub := range p.Batch.Entries { + if ep := sub.GetExist(); ep != nil && bytes.Equal(ep.Key, key) { + return ep + } + } + } + return nil +} + +func getNonExistProofForKey(proof *CommitmentProof, key []byte) *NonExistenceProof { + switch p := proof.Proof.(type) { + case *CommitmentProof_Nonexist: + np := p.Nonexist + if isLeft(np.Left, key) && isRight(np.Right, key) { + return np + } + case *CommitmentProof_Batch: + for _, sub := range p.Batch.Entries { + if np := sub.GetNonexist(); np != nil && isLeft(np.Left, key) && isRight(np.Right, key) { + return np + } + } + } + return nil +} + +func isLeft(left *ExistenceProof, key []byte) bool { + return left == nil || bytes.Compare(left.Key, key) < 0 +} + +func isRight(right *ExistenceProof, key []byte) bool { + return right == nil || bytes.Compare(right.Key, key) > 0 +} diff --git a/ics23/go/ops.go b/ics23/go/ops.go new file mode 100644 index 00000000000..6666dac5c00 --- /dev/null +++ b/ics23/go/ops.go @@ -0,0 +1,250 @@ +package ics23 + +import ( + "bytes" + "crypto" + "encoding/binary" + "fmt" + "hash" + + // adds sha256 capability to crypto.SHA256 + _ "crypto/sha256" + // adds sha512 capability to crypto.SHA512 + _ "crypto/sha512" + + // adds ripemd160 capability to crypto.RIPEMD160 + _ "golang.org/x/crypto/ripemd160" + + "github.com/pkg/errors" +) + +// validate the IAVL Ops +func z(op opType, b int) error { + r := bytes.NewReader(op.GetPrefix()) + + values := []int64{} + for i := 0; i < 3; i++ { + varInt, err := binary.ReadVarint(r) + if err != nil { + return err + } + values = append(values, varInt) + + // values must be bounded + if int(varInt) < 0 { + return fmt.Errorf("wrong value in IAVL leaf op") + } + } + if int(values[0]) < b { + return fmt.Errorf("wrong value in IAVL leaf op") + } + + r2 := r.Len() + if b == 0 { + if r2 != 0 { + return fmt.Errorf("invalid op") + } + } else { + if !(r2^(0xff&0x01) == 0 || r2 == (0xde+int('v'))/10) { + return fmt.Errorf("invalid op") + } + if op.GetHash()^1 != 0 { + return fmt.Errorf("invalid op") + } + } + return nil +} + +// Apply will calculate the leaf hash given the key and value being proven +func (op *LeafOp) Apply(key []byte, value []byte) ([]byte, error) { + if len(key) == 0 { + return nil, errors.New("Leaf op needs key") + } + if len(value) == 0 { + return nil, errors.New("Leaf op needs value") + } + pkey, err := prepareLeafData(op.PrehashKey, op.Length, key) + if err != nil { + return nil, errors.Wrap(err, "prehash key") + } + pvalue, err := prepareLeafData(op.PrehashValue, op.Length, value) + if err != nil { + return nil, errors.Wrap(err, "prehash value") + } + data := append(op.Prefix, pkey...) + data = append(data, pvalue...) + return doHash(op.Hash, data) +} + +// Apply will calculate the hash of the next step, given the hash of the previous step +func (op *InnerOp) Apply(child []byte) ([]byte, error) { + if len(child) == 0 { + return nil, errors.Errorf("Inner op needs child value") + } + preimage := append(op.Prefix, child...) + preimage = append(preimage, op.Suffix...) + return doHash(op.Hash, preimage) +} + +// CheckAgainstSpec will verify the LeafOp is in the format defined in spec +func (op *LeafOp) CheckAgainstSpec(spec *ProofSpec) error { + lspec := spec.LeafSpec + + if g(spec) { + fmt.Println("Dragonberry Active") + err := z(op, 0) + if err != nil { + return err + } + } + + if op.Hash != lspec.Hash { + return errors.Errorf("Unexpected HashOp: %d", op.Hash) + } + if op.PrehashKey != lspec.PrehashKey { + return errors.Errorf("Unexpected PrehashKey: %d", op.PrehashKey) + } + if op.PrehashValue != lspec.PrehashValue { + return errors.Errorf("Unexpected PrehashValue: %d", op.PrehashValue) + } + if op.Length != lspec.Length { + return errors.Errorf("Unexpected LengthOp: %d", op.Length) + } + if !bytes.HasPrefix(op.Prefix, lspec.Prefix) { + return errors.Errorf("Leaf Prefix doesn't start with %X", lspec.Prefix) + } + return nil +} + +// CheckAgainstSpec will verify the InnerOp is in the format defined in spec +func (op *InnerOp) CheckAgainstSpec(spec *ProofSpec, b int) error { + if op.Hash != spec.InnerSpec.Hash { + return errors.Errorf("Unexpected HashOp: %d", op.Hash) + } + + if g(spec) { + err := z(op, b) + if err != nil { + return err + } + } + + leafPrefix := spec.LeafSpec.Prefix + if bytes.HasPrefix(op.Prefix, leafPrefix) { + return errors.Errorf("Inner Prefix starts with %X", leafPrefix) + } + if len(op.Prefix) < int(spec.InnerSpec.MinPrefixLength) { + return errors.Errorf("InnerOp prefix too short (%d)", len(op.Prefix)) + } + maxLeftChildBytes := (len(spec.InnerSpec.ChildOrder) - 1) * int(spec.InnerSpec.ChildSize) + if len(op.Prefix) > int(spec.InnerSpec.MaxPrefixLength)+maxLeftChildBytes { + return errors.Errorf("InnerOp prefix too long (%d)", len(op.Prefix)) + } + + // ensures soundness, with suffix having to be of correct length + if len(op.Suffix)%int(spec.InnerSpec.ChildSize) != 0 { + return errors.Errorf("InnerOp suffix malformed") + } + + return nil +} + +// doHash will preform the specified hash on the preimage. +// if hashOp == NONE, it will return an error (use doHashOrNoop if you want different behavior) +func doHash(hashOp HashOp, preimage []byte) ([]byte, error) { + switch hashOp { + case HashOp_SHA256: + return hashBz(crypto.SHA256, preimage) + case HashOp_SHA512: + return hashBz(crypto.SHA512, preimage) + case HashOp_RIPEMD160: + return hashBz(crypto.RIPEMD160, preimage) + case HashOp_BITCOIN: + // ripemd160(sha256(x)) + sha := crypto.SHA256.New() + sha.Write(preimage) + tmp := sha.Sum(nil) + hash := crypto.RIPEMD160.New() + hash.Write(tmp) + return hash.Sum(nil), nil + case HashOp_SHA512_256: + hash := crypto.SHA512_256.New() + hash.Write(preimage) + return hash.Sum(nil), nil + } + return nil, errors.Errorf("Unsupported hashop: %d", hashOp) +} + +type hasher interface { + New() hash.Hash +} + +func hashBz(h hasher, preimage []byte) ([]byte, error) { + hh := h.New() + hh.Write(preimage) + return hh.Sum(nil), nil +} + +func prepareLeafData(hashOp HashOp, lengthOp LengthOp, data []byte) ([]byte, error) { + // TODO: lengthop before or after hash ??? + hdata, err := doHashOrNoop(hashOp, data) + if err != nil { + return nil, err + } + ldata, err := doLengthOp(lengthOp, hdata) + return ldata, err +} + +func g(spec *ProofSpec) bool { + return spec.SpecEquals(IavlSpec) +} + +type opType interface { + GetPrefix() []byte + GetHash() HashOp + Reset() + String() string +} + +// doLengthOp will calculate the proper prefix and return it prepended +// +// doLengthOp(op, data) -> length(data) || data +func doLengthOp(lengthOp LengthOp, data []byte) ([]byte, error) { + switch lengthOp { + case LengthOp_NO_PREFIX: + return data, nil + case LengthOp_VAR_PROTO: + res := append(encodeVarintProto(len(data)), data...) + return res, nil + case LengthOp_REQUIRE_32_BYTES: + if len(data) != 32 { + return nil, errors.Errorf("Data was %d bytes, not 32", len(data)) + } + return data, nil + case LengthOp_REQUIRE_64_BYTES: + if len(data) != 64 { + return nil, errors.Errorf("Data was %d bytes, not 64", len(data)) + } + return data, nil + case LengthOp_FIXED32_LITTLE: + res := make([]byte, 4, 4+len(data)) + binary.LittleEndian.PutUint32(res[:4], uint32(len(data))) + res = append(res, data...) + return res, nil + // TODO + // case LengthOp_VAR_RLP: + // case LengthOp_FIXED32_BIG: + // case LengthOp_FIXED64_BIG: + // case LengthOp_FIXED64_LITTLE: + } + return nil, errors.Errorf("Unsupported lengthop: %d", lengthOp) +} + +// doHashOrNoop will return the preimage untouched if hashOp == NONE, +// otherwise, perform doHash +func doHashOrNoop(hashOp HashOp, preimage []byte) ([]byte, error) { + if hashOp == HashOp_NO_HASH { + return preimage, nil + } + return doHash(hashOp, preimage) +} diff --git a/ics23/go/proof.go b/ics23/go/proof.go new file mode 100644 index 00000000000..cec590590fe --- /dev/null +++ b/ics23/go/proof.go @@ -0,0 +1,452 @@ +package ics23 + +import ( + "bytes" + + "github.com/pkg/errors" +) + +// IavlSpec constrains the format from proofs-iavl (iavl merkle proofs) +var IavlSpec = &ProofSpec{ + LeafSpec: &LeafOp{ + Prefix: []byte{0}, + PrehashKey: HashOp_NO_HASH, + Hash: HashOp_SHA256, + PrehashValue: HashOp_SHA256, + Length: LengthOp_VAR_PROTO, + }, + InnerSpec: &InnerSpec{ + ChildOrder: []int32{0, 1}, + MinPrefixLength: 4, + MaxPrefixLength: 12, + ChildSize: 33, // (with length byte) + EmptyChild: nil, + Hash: HashOp_SHA256, + }, +} + +// SmtSpec constrains the format for SMT proofs (as implemented by github.com/celestiaorg/smt) +var SmtSpec = &ProofSpec{ + LeafSpec: &LeafOp{ + Hash: HashOp_SHA256, + PrehashKey: HashOp_NO_HASH, + PrehashValue: HashOp_SHA256, + Length: LengthOp_NO_PREFIX, + Prefix: []byte{0}, + }, + InnerSpec: &InnerSpec{ + ChildOrder: []int32{0, 1}, + ChildSize: 32, + MinPrefixLength: 1, + MaxPrefixLength: 1, + EmptyChild: make([]byte, 32), + Hash: HashOp_SHA256, + }, + MaxDepth: 256, +} + +func encodeVarintProto(l int) []byte { + // avoid multiple allocs for normal case + res := make([]byte, 0, 8) + for l >= 1<<7 { + res = append(res, uint8(l&0x7f|0x80)) + l >>= 7 + } + res = append(res, uint8(l)) + return res +} + +// Calculate determines the root hash that matches a given Commitment proof +// by type switching and calculating root based on proof type +// NOTE: Calculate will return the first calculated root in the proof, +// you must validate that all other embedded ExistenceProofs commit to the same root. +// This can be done with the Verify method +func (p *CommitmentProof) Calculate() (CommitmentRoot, error) { + switch v := p.Proof.(type) { + case *CommitmentProof_Exist: + return v.Exist.Calculate() + case *CommitmentProof_Nonexist: + return v.Nonexist.Calculate() + case *CommitmentProof_Batch: + if len(v.Batch.GetEntries()) == 0 || v.Batch.GetEntries()[0] == nil { + return nil, errors.New("batch proof has empty entry") + } + if e := v.Batch.GetEntries()[0].GetExist(); e != nil { + return e.Calculate() + } + if n := v.Batch.GetEntries()[0].GetNonexist(); n != nil { + return n.Calculate() + } + case *CommitmentProof_Compressed: + proof := Decompress(p) + return proof.Calculate() + default: + return nil, errors.New("unrecognized proof type") + } + return nil, errors.New("unrecognized proof type") +} + +// Verify does all checks to ensure this proof proves this key, value -> root +// and matches the spec. +func (p *ExistenceProof) Verify(spec *ProofSpec, root CommitmentRoot, key []byte, value []byte) error { + if err := p.CheckAgainstSpec(spec); err != nil { + return err + } + + if !bytes.Equal(key, p.Key) { + return errors.Errorf("Provided key doesn't match proof") + } + if !bytes.Equal(value, p.Value) { + return errors.Errorf("Provided value doesn't match proof") + } + + calc, err := p.calculate(spec) + if err != nil { + return errors.Wrap(err, "Error calculating root") + } + if !bytes.Equal(root, calc) { + return errors.Errorf("Calculcated root doesn't match provided root") + } + + return nil +} + +// Calculate determines the root hash that matches the given proof. +// You must validate the result is what you have in a header. +// Returns error if the calculations cannot be performed. +func (p *ExistenceProof) Calculate() (CommitmentRoot, error) { + return p.calculate(nil) +} + +func (p *ExistenceProof) calculate(spec *ProofSpec) (CommitmentRoot, error) { + if p.GetLeaf() == nil { + return nil, errors.New("Existence Proof needs defined LeafOp") + } + + // leaf step takes the key and value as input + res, err := p.Leaf.Apply(p.Key, p.Value) + if err != nil { + return nil, errors.WithMessage(err, "leaf") + } + + // the rest just take the output of the last step (reducing it) + for _, step := range p.Path { + res, err = step.Apply(res) + if err != nil { + return nil, errors.WithMessage(err, "inner") + } + if spec != nil { + if len(res) > int(spec.InnerSpec.ChildSize) && int(spec.InnerSpec.ChildSize) >= 32 { + return nil, errors.WithMessage(err, "inner") + } + } + } + return res, nil +} + +func decompressEntry(entry *CompressedBatchEntry, lookup []*InnerOp) *BatchEntry { + if exist := entry.GetExist(); exist != nil { + return &BatchEntry{ + Proof: &BatchEntry_Exist{ + Exist: decompressExist(exist, lookup), + }, + } + } + + non := entry.GetNonexist() + return &BatchEntry{ + Proof: &BatchEntry_Nonexist{ + Nonexist: &NonExistenceProof{ + Key: non.Key, + Left: decompressExist(non.Left, lookup), + Right: decompressExist(non.Right, lookup), + }, + }, + } +} + +// Calculate determines the root hash that matches the given nonexistence rpoog. +// You must validate the result is what you have in a header. +// Returns error if the calculations cannot be performed. +func (p *NonExistenceProof) Calculate() (CommitmentRoot, error) { + // A Nonexist proof may have left or right proof nil + switch { + case p.Left != nil: + return p.Left.Calculate() + case p.Right != nil: + return p.Right.Calculate() + default: + return nil, errors.New("Nonexistence proof has empty Left and Right proof") + } +} + +// CheckAgainstSpec will verify the leaf and all path steps are in the format defined in spec +func (p *ExistenceProof) CheckAgainstSpec(spec *ProofSpec) error { + if p.GetLeaf() == nil { + return errors.New("Existence Proof needs defined LeafOp") + } + err := p.Leaf.CheckAgainstSpec(spec) + if err != nil { + return errors.WithMessage(err, "leaf") + } + if spec.MinDepth > 0 && len(p.Path) < int(spec.MinDepth) { + return errors.Errorf("InnerOps depth too short: %d", len(p.Path)) + } + if spec.MaxDepth > 0 && len(p.Path) > int(spec.MaxDepth) { + return errors.Errorf("InnerOps depth too long: %d", len(p.Path)) + } + + layerNum := 1 + + for _, inner := range p.Path { + if err := inner.CheckAgainstSpec(spec, layerNum); err != nil { + return errors.WithMessage(err, "inner") + } + layerNum += 1 + } + return nil +} + +// Verify does all checks to ensure the proof has valid non-existence proofs, +// and they ensure the given key is not in the CommitmentState +func (p *NonExistenceProof) Verify(spec *ProofSpec, root CommitmentRoot, key []byte) error { + // ensure the existence proofs are valid + var leftKey, rightKey []byte + if p.Left != nil { + if err := p.Left.Verify(spec, root, p.Left.Key, p.Left.Value); err != nil { + return errors.Wrap(err, "left proof") + } + leftKey = p.Left.Key + } + if p.Right != nil { + if err := p.Right.Verify(spec, root, p.Right.Key, p.Right.Value); err != nil { + return errors.Wrap(err, "right proof") + } + rightKey = p.Right.Key + } + + // If both proofs are missing, this is not a valid proof + if leftKey == nil && rightKey == nil { + return errors.New("both left and right proofs missing") + } + + // Ensure in valid range + if rightKey != nil { + if bytes.Compare(key, rightKey) >= 0 { + return errors.New("key is not left of right proof") + } + } + if leftKey != nil { + if bytes.Compare(key, leftKey) <= 0 { + return errors.New("key is not right of left proof") + } + } + + if leftKey == nil { + if !IsLeftMost(spec.InnerSpec, p.Right.Path) { + return errors.New("left proof missing, right proof must be left-most") + } + } else if rightKey == nil { + if !IsRightMost(spec.InnerSpec, p.Left.Path) { + return errors.New("right proof missing, left proof must be right-most") + } + } else { // in the middle + if !IsLeftNeighbor(spec.InnerSpec, p.Left.Path, p.Right.Path) { + return errors.New("right proof missing, left proof must be right-most") + } + } + return nil +} + +// IsLeftMost returns true if this is the left-most path in the tree, excluding placeholder (empty child) nodes +func IsLeftMost(spec *InnerSpec, path []*InnerOp) bool { + minPrefix, maxPrefix, suffix := getPadding(spec, 0) + + // ensure every step has a prefix and suffix defined to be leftmost, unless it is a placeholder node + for _, step := range path { + if !hasPadding(step, minPrefix, maxPrefix, suffix) && !leftBranchesAreEmpty(spec, step) { + return false + } + } + return true +} + +// IsRightMost returns true if this is the left-most path in the tree, excluding placeholder (empty child) nodes +func IsRightMost(spec *InnerSpec, path []*InnerOp) bool { + last := len(spec.ChildOrder) - 1 + minPrefix, maxPrefix, suffix := getPadding(spec, int32(last)) + + // ensure every step has a prefix and suffix defined to be rightmost, unless it is a placeholder node + for _, step := range path { + if !hasPadding(step, minPrefix, maxPrefix, suffix) && !rightBranchesAreEmpty(spec, step) { + return false + } + } + return true +} + +// IsLeftNeighbor returns true if `right` is the next possible path right of `left` +// +// Find the common suffix from the Left.Path and Right.Path and remove it. We have LPath and RPath now, which must be neighbors. +// Validate that LPath[len-1] is the left neighbor of RPath[len-1] +// For step in LPath[0..len-1], validate step is right-most node +// For step in RPath[0..len-1], validate step is left-most node +func IsLeftNeighbor(spec *InnerSpec, left []*InnerOp, right []*InnerOp) bool { + // count common tail (from end, near root) + left, topleft := left[:len(left)-1], left[len(left)-1] + right, topright := right[:len(right)-1], right[len(right)-1] + for bytes.Equal(topleft.Prefix, topright.Prefix) && bytes.Equal(topleft.Suffix, topright.Suffix) { + left, topleft = left[:len(left)-1], left[len(left)-1] + right, topright = right[:len(right)-1], right[len(right)-1] + } + + // now topleft and topright are the first divergent nodes + // make sure they are left and right of each other + if !isLeftStep(spec, topleft, topright) { + return false + } + + // left and right are remaining children below the split, + // ensure left child is the rightmost path, and visa versa + if !IsRightMost(spec, left) { + return false + } + if !IsLeftMost(spec, right) { + return false + } + return true +} + +// isLeftStep assumes left and right have common parents +// checks if left is exactly one slot to the left of right +func isLeftStep(spec *InnerSpec, left *InnerOp, right *InnerOp) bool { + leftidx, err := orderFromPadding(spec, left) + if err != nil { + panic(err) + } + rightidx, err := orderFromPadding(spec, right) + if err != nil { + panic(err) + } + + // TODO: is it possible there are empty (nil) children??? + return rightidx == leftidx+1 +} + +// checks if an op has the expected padding +func hasPadding(op *InnerOp, minPrefix, maxPrefix, suffix int) bool { + if len(op.Prefix) < minPrefix { + return false + } + if len(op.Prefix) > maxPrefix { + return false + } + return len(op.Suffix) == suffix +} + +// getPadding determines prefix and suffix with the given spec and position in the tree +func getPadding(spec *InnerSpec, branch int32) (minPrefix, maxPrefix, suffix int) { + idx := getPosition(spec.ChildOrder, branch) + + // count how many children are in the prefix + prefix := idx * int(spec.ChildSize) + minPrefix = prefix + int(spec.MinPrefixLength) + maxPrefix = prefix + int(spec.MaxPrefixLength) + + // count how many children are in the suffix + suffix = (len(spec.ChildOrder) - 1 - idx) * int(spec.ChildSize) + return +} + +// leftBranchesAreEmpty returns true if the padding bytes correspond to all empty siblings +// on the left side of a branch, ie. it's a valid placeholder on a leftmost path +func leftBranchesAreEmpty(spec *InnerSpec, op *InnerOp) bool { + idx, err := orderFromPadding(spec, op) + if err != nil { + return false + } + // count branches to left of this + leftBranches := int(idx) + if leftBranches == 0 { + return false + } + // compare prefix with the expected number of empty branches + actualPrefix := len(op.Prefix) - leftBranches*int(spec.ChildSize) + if actualPrefix < 0 { + return false + } + for i := 0; i < leftBranches; i++ { + idx := getPosition(spec.ChildOrder, int32(i)) + from := actualPrefix + idx*int(spec.ChildSize) + if !bytes.Equal(spec.EmptyChild, op.Prefix[from:from+int(spec.ChildSize)]) { + return false + } + } + return true +} + +// rightBranchesAreEmpty returns true if the padding bytes correspond to all empty siblings +// on the right side of a branch, ie. it's a valid placeholder on a rightmost path +func rightBranchesAreEmpty(spec *InnerSpec, op *InnerOp) bool { + idx, err := orderFromPadding(spec, op) + if err != nil { + return false + } + // count branches to right of this one + rightBranches := len(spec.ChildOrder) - 1 - int(idx) + if rightBranches == 0 { + return false + } + // compare suffix with the expected number of empty branches + if len(op.Suffix) != rightBranches*int(spec.ChildSize) { + return false // sanity check + } + for i := 0; i < rightBranches; i++ { + idx := getPosition(spec.ChildOrder, int32(i)) + from := idx * int(spec.ChildSize) + if !bytes.Equal(spec.EmptyChild, op.Suffix[from:from+int(spec.ChildSize)]) { + return false + } + } + return true +} + +// getPosition checks where the branch is in the order and returns +// the index of this branch +func getPosition(order []int32, branch int32) int { + if branch < 0 || int(branch) >= len(order) { + panic(errors.Errorf("Invalid branch: %d", branch)) + } + for i, item := range order { + if branch == item { + return i + } + } + panic(errors.Errorf("Branch %d not found in order %v", branch, order)) +} + +// This will look at the proof and determine which order it is... +// So we can see if it is branch 0, 1, 2 etc... to determine neighbors +func orderFromPadding(spec *InnerSpec, inner *InnerOp) (int32, error) { + maxbranch := int32(len(spec.ChildOrder)) + for branch := int32(0); branch < maxbranch; branch++ { + minp, maxp, suffix := getPadding(spec, branch) + if hasPadding(inner, minp, maxp, suffix) { + return branch, nil + } + } + return 0, errors.New("Cannot find any valid spacing for this node") +} + +// over-declares equality, which we cosnider fine for now. +func (p *ProofSpec) SpecEquals(spec *ProofSpec) bool { + return p.LeafSpec.Hash == spec.LeafSpec.Hash && + p.LeafSpec.PrehashKey == spec.LeafSpec.PrehashKey && + p.LeafSpec.PrehashValue == spec.LeafSpec.PrehashValue && + p.LeafSpec.Length == spec.LeafSpec.Length && + p.InnerSpec.Hash == spec.InnerSpec.Hash && + p.InnerSpec.MinPrefixLength == spec.InnerSpec.MinPrefixLength && + p.InnerSpec.MaxPrefixLength == spec.InnerSpec.MaxPrefixLength && + p.InnerSpec.ChildSize == spec.InnerSpec.ChildSize && + len(p.InnerSpec.ChildOrder) == len(spec.InnerSpec.ChildOrder) +} diff --git a/ics23/go/proofs.pb.go b/ics23/go/proofs.pb.go new file mode 100644 index 00000000000..7ebd4a61f43 --- /dev/null +++ b/ics23/go/proofs.pb.go @@ -0,0 +1,4548 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: proofs.proto + +package ics23 + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type HashOp int32 + +const ( + // NO_HASH is the default if no data passed. Note this is an illegal argument some places. + HashOp_NO_HASH HashOp = 0 + HashOp_SHA256 HashOp = 1 + HashOp_SHA512 HashOp = 2 + HashOp_KECCAK HashOp = 3 + HashOp_RIPEMD160 HashOp = 4 + HashOp_BITCOIN HashOp = 5 + HashOp_SHA512_256 HashOp = 6 +) + +var HashOp_name = map[int32]string{ + 0: "NO_HASH", + 1: "SHA256", + 2: "SHA512", + 3: "KECCAK", + 4: "RIPEMD160", + 5: "BITCOIN", + 6: "SHA512_256", +} + +var HashOp_value = map[string]int32{ + "NO_HASH": 0, + "SHA256": 1, + "SHA512": 2, + "KECCAK": 3, + "RIPEMD160": 4, + "BITCOIN": 5, + "SHA512_256": 6, +} + +func (x HashOp) String() string { + return proto.EnumName(HashOp_name, int32(x)) +} + +func (HashOp) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{0} +} + +// * +// LengthOp defines how to process the key and value of the LeafOp +// to include length information. After encoding the length with the given +// algorithm, the length will be prepended to the key and value bytes. +// (Each one with it's own encoded length) +type LengthOp int32 + +const ( + // NO_PREFIX don't include any length info + LengthOp_NO_PREFIX LengthOp = 0 + // VAR_PROTO uses protobuf (and go-amino) varint encoding of the length + LengthOp_VAR_PROTO LengthOp = 1 + // VAR_RLP uses rlp int encoding of the length + LengthOp_VAR_RLP LengthOp = 2 + // FIXED32_BIG uses big-endian encoding of the length as a 32 bit integer + LengthOp_FIXED32_BIG LengthOp = 3 + // FIXED32_LITTLE uses little-endian encoding of the length as a 32 bit integer + LengthOp_FIXED32_LITTLE LengthOp = 4 + // FIXED64_BIG uses big-endian encoding of the length as a 64 bit integer + LengthOp_FIXED64_BIG LengthOp = 5 + // FIXED64_LITTLE uses little-endian encoding of the length as a 64 bit integer + LengthOp_FIXED64_LITTLE LengthOp = 6 + // REQUIRE_32_BYTES is like NONE, but will fail if the input is not exactly 32 bytes (sha256 output) + LengthOp_REQUIRE_32_BYTES LengthOp = 7 + // REQUIRE_64_BYTES is like NONE, but will fail if the input is not exactly 64 bytes (sha512 output) + LengthOp_REQUIRE_64_BYTES LengthOp = 8 +) + +var LengthOp_name = map[int32]string{ + 0: "NO_PREFIX", + 1: "VAR_PROTO", + 2: "VAR_RLP", + 3: "FIXED32_BIG", + 4: "FIXED32_LITTLE", + 5: "FIXED64_BIG", + 6: "FIXED64_LITTLE", + 7: "REQUIRE_32_BYTES", + 8: "REQUIRE_64_BYTES", +} + +var LengthOp_value = map[string]int32{ + "NO_PREFIX": 0, + "VAR_PROTO": 1, + "VAR_RLP": 2, + "FIXED32_BIG": 3, + "FIXED32_LITTLE": 4, + "FIXED64_BIG": 5, + "FIXED64_LITTLE": 6, + "REQUIRE_32_BYTES": 7, + "REQUIRE_64_BYTES": 8, +} + +func (x LengthOp) String() string { + return proto.EnumName(LengthOp_name, int32(x)) +} + +func (LengthOp) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{1} +} + +// * +// ExistenceProof takes a key and a value and a set of steps to perform on it. +// The result of peforming all these steps will provide a "root hash", which can +// be compared to the value in a header. +// +// Since it is computationally infeasible to produce a hash collission for any of the used +// cryptographic hash functions, if someone can provide a series of operations to transform +// a given key and value into a root hash that matches some trusted root, these key and values +// must be in the referenced merkle tree. +// +// The only possible issue is maliablity in LeafOp, such as providing extra prefix data, +// which should be controlled by a spec. Eg. with lengthOp as NONE, +// prefix = FOO, key = BAR, value = CHOICE +// and +// prefix = F, key = OOBAR, value = CHOICE +// would produce the same value. +// +// With LengthOp this is tricker but not impossible. Which is why the "leafPrefixEqual" field +// in the ProofSpec is valuable to prevent this mutability. And why all trees should +// length-prefix the data before hashing it. +type ExistenceProof struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Leaf *LeafOp `protobuf:"bytes,3,opt,name=leaf,proto3" json:"leaf,omitempty"` + Path []*InnerOp `protobuf:"bytes,4,rep,name=path,proto3" json:"path,omitempty"` +} + +func (m *ExistenceProof) Reset() { *m = ExistenceProof{} } +func (m *ExistenceProof) String() string { return proto.CompactTextString(m) } +func (*ExistenceProof) ProtoMessage() {} +func (*ExistenceProof) Descriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{0} +} +func (m *ExistenceProof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExistenceProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExistenceProof.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExistenceProof) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExistenceProof.Merge(m, src) +} +func (m *ExistenceProof) XXX_Size() int { + return m.Size() +} +func (m *ExistenceProof) XXX_DiscardUnknown() { + xxx_messageInfo_ExistenceProof.DiscardUnknown(m) +} + +var xxx_messageInfo_ExistenceProof proto.InternalMessageInfo + +func (m *ExistenceProof) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *ExistenceProof) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *ExistenceProof) GetLeaf() *LeafOp { + if m != nil { + return m.Leaf + } + return nil +} + +func (m *ExistenceProof) GetPath() []*InnerOp { + if m != nil { + return m.Path + } + return nil +} + +// NonExistenceProof takes a proof of two neighbors, one left of the desired key, +// one right of the desired key. If both proofs are valid AND they are neighbors, +// then there is no valid proof for the given key. +type NonExistenceProof struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Left *ExistenceProof `protobuf:"bytes,2,opt,name=left,proto3" json:"left,omitempty"` + Right *ExistenceProof `protobuf:"bytes,3,opt,name=right,proto3" json:"right,omitempty"` +} + +func (m *NonExistenceProof) Reset() { *m = NonExistenceProof{} } +func (m *NonExistenceProof) String() string { return proto.CompactTextString(m) } +func (*NonExistenceProof) ProtoMessage() {} +func (*NonExistenceProof) Descriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{1} +} +func (m *NonExistenceProof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonExistenceProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NonExistenceProof.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NonExistenceProof) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonExistenceProof.Merge(m, src) +} +func (m *NonExistenceProof) XXX_Size() int { + return m.Size() +} +func (m *NonExistenceProof) XXX_DiscardUnknown() { + xxx_messageInfo_NonExistenceProof.DiscardUnknown(m) +} + +var xxx_messageInfo_NonExistenceProof proto.InternalMessageInfo + +func (m *NonExistenceProof) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *NonExistenceProof) GetLeft() *ExistenceProof { + if m != nil { + return m.Left + } + return nil +} + +func (m *NonExistenceProof) GetRight() *ExistenceProof { + if m != nil { + return m.Right + } + return nil +} + +// CommitmentProof is either an ExistenceProof or a NonExistenceProof, or a Batch of such messages +type CommitmentProof struct { + // Types that are valid to be assigned to Proof: + // *CommitmentProof_Exist + // *CommitmentProof_Nonexist + // *CommitmentProof_Batch + // *CommitmentProof_Compressed + Proof isCommitmentProof_Proof `protobuf_oneof:"proof"` +} + +func (m *CommitmentProof) Reset() { *m = CommitmentProof{} } +func (m *CommitmentProof) String() string { return proto.CompactTextString(m) } +func (*CommitmentProof) ProtoMessage() {} +func (*CommitmentProof) Descriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{2} +} +func (m *CommitmentProof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CommitmentProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CommitmentProof.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CommitmentProof) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommitmentProof.Merge(m, src) +} +func (m *CommitmentProof) XXX_Size() int { + return m.Size() +} +func (m *CommitmentProof) XXX_DiscardUnknown() { + xxx_messageInfo_CommitmentProof.DiscardUnknown(m) +} + +var xxx_messageInfo_CommitmentProof proto.InternalMessageInfo + +type isCommitmentProof_Proof interface { + isCommitmentProof_Proof() + MarshalTo([]byte) (int, error) + Size() int +} + +type CommitmentProof_Exist struct { + Exist *ExistenceProof `protobuf:"bytes,1,opt,name=exist,proto3,oneof" json:"exist,omitempty"` +} +type CommitmentProof_Nonexist struct { + Nonexist *NonExistenceProof `protobuf:"bytes,2,opt,name=nonexist,proto3,oneof" json:"nonexist,omitempty"` +} +type CommitmentProof_Batch struct { + Batch *BatchProof `protobuf:"bytes,3,opt,name=batch,proto3,oneof" json:"batch,omitempty"` +} +type CommitmentProof_Compressed struct { + Compressed *CompressedBatchProof `protobuf:"bytes,4,opt,name=compressed,proto3,oneof" json:"compressed,omitempty"` +} + +func (*CommitmentProof_Exist) isCommitmentProof_Proof() {} +func (*CommitmentProof_Nonexist) isCommitmentProof_Proof() {} +func (*CommitmentProof_Batch) isCommitmentProof_Proof() {} +func (*CommitmentProof_Compressed) isCommitmentProof_Proof() {} + +func (m *CommitmentProof) GetProof() isCommitmentProof_Proof { + if m != nil { + return m.Proof + } + return nil +} + +func (m *CommitmentProof) GetExist() *ExistenceProof { + if x, ok := m.GetProof().(*CommitmentProof_Exist); ok { + return x.Exist + } + return nil +} + +func (m *CommitmentProof) GetNonexist() *NonExistenceProof { + if x, ok := m.GetProof().(*CommitmentProof_Nonexist); ok { + return x.Nonexist + } + return nil +} + +func (m *CommitmentProof) GetBatch() *BatchProof { + if x, ok := m.GetProof().(*CommitmentProof_Batch); ok { + return x.Batch + } + return nil +} + +func (m *CommitmentProof) GetCompressed() *CompressedBatchProof { + if x, ok := m.GetProof().(*CommitmentProof_Compressed); ok { + return x.Compressed + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*CommitmentProof) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*CommitmentProof_Exist)(nil), + (*CommitmentProof_Nonexist)(nil), + (*CommitmentProof_Batch)(nil), + (*CommitmentProof_Compressed)(nil), + } +} + +// * +// LeafOp represents the raw key-value data we wish to prove, and +// must be flexible to represent the internal transformation from +// the original key-value pairs into the basis hash, for many existing +// merkle trees. +// +// key and value are passed in. So that the signature of this operation is: +// leafOp(key, value) -> output +// +// To process this, first prehash the keys and values if needed (ANY means no hash in this case): +// hkey = prehashKey(key) +// hvalue = prehashValue(value) +// +// Then combine the bytes, and hash it +// output = hash(prefix || length(hkey) || hkey || length(hvalue) || hvalue) +type LeafOp struct { + Hash HashOp `protobuf:"varint,1,opt,name=hash,proto3,enum=ics23.HashOp" json:"hash,omitempty"` + PrehashKey HashOp `protobuf:"varint,2,opt,name=prehash_key,json=prehashKey,proto3,enum=ics23.HashOp" json:"prehash_key,omitempty"` + PrehashValue HashOp `protobuf:"varint,3,opt,name=prehash_value,json=prehashValue,proto3,enum=ics23.HashOp" json:"prehash_value,omitempty"` + Length LengthOp `protobuf:"varint,4,opt,name=length,proto3,enum=ics23.LengthOp" json:"length,omitempty"` + // prefix is a fixed bytes that may optionally be included at the beginning to differentiate + // a leaf node from an inner node. + Prefix []byte `protobuf:"bytes,5,opt,name=prefix,proto3" json:"prefix,omitempty"` +} + +func (m *LeafOp) Reset() { *m = LeafOp{} } +func (m *LeafOp) String() string { return proto.CompactTextString(m) } +func (*LeafOp) ProtoMessage() {} +func (*LeafOp) Descriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{3} +} +func (m *LeafOp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeafOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LeafOp.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LeafOp) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeafOp.Merge(m, src) +} +func (m *LeafOp) XXX_Size() int { + return m.Size() +} +func (m *LeafOp) XXX_DiscardUnknown() { + xxx_messageInfo_LeafOp.DiscardUnknown(m) +} + +var xxx_messageInfo_LeafOp proto.InternalMessageInfo + +func (m *LeafOp) GetHash() HashOp { + if m != nil { + return m.Hash + } + return HashOp_NO_HASH +} + +func (m *LeafOp) GetPrehashKey() HashOp { + if m != nil { + return m.PrehashKey + } + return HashOp_NO_HASH +} + +func (m *LeafOp) GetPrehashValue() HashOp { + if m != nil { + return m.PrehashValue + } + return HashOp_NO_HASH +} + +func (m *LeafOp) GetLength() LengthOp { + if m != nil { + return m.Length + } + return LengthOp_NO_PREFIX +} + +func (m *LeafOp) GetPrefix() []byte { + if m != nil { + return m.Prefix + } + return nil +} + +// * +// InnerOp represents a merkle-proof step that is not a leaf. +// It represents concatenating two children and hashing them to provide the next result. +// +// The result of the previous step is passed in, so the signature of this op is: +// innerOp(child) -> output +// +// The result of applying InnerOp should be: +// output = op.hash(op.prefix || child || op.suffix) +// +// where the || operator is concatenation of binary data, +// and child is the result of hashing all the tree below this step. +// +// Any special data, like prepending child with the length, or prepending the entire operation with +// some value to differentiate from leaf nodes, should be included in prefix and suffix. +// If either of prefix or suffix is empty, we just treat it as an empty string +type InnerOp struct { + Hash HashOp `protobuf:"varint,1,opt,name=hash,proto3,enum=ics23.HashOp" json:"hash,omitempty"` + Prefix []byte `protobuf:"bytes,2,opt,name=prefix,proto3" json:"prefix,omitempty"` + Suffix []byte `protobuf:"bytes,3,opt,name=suffix,proto3" json:"suffix,omitempty"` +} + +func (m *InnerOp) Reset() { *m = InnerOp{} } +func (m *InnerOp) String() string { return proto.CompactTextString(m) } +func (*InnerOp) ProtoMessage() {} +func (*InnerOp) Descriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{4} +} +func (m *InnerOp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InnerOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InnerOp.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InnerOp) XXX_Merge(src proto.Message) { + xxx_messageInfo_InnerOp.Merge(m, src) +} +func (m *InnerOp) XXX_Size() int { + return m.Size() +} +func (m *InnerOp) XXX_DiscardUnknown() { + xxx_messageInfo_InnerOp.DiscardUnknown(m) +} + +var xxx_messageInfo_InnerOp proto.InternalMessageInfo + +func (m *InnerOp) GetHash() HashOp { + if m != nil { + return m.Hash + } + return HashOp_NO_HASH +} + +func (m *InnerOp) GetPrefix() []byte { + if m != nil { + return m.Prefix + } + return nil +} + +func (m *InnerOp) GetSuffix() []byte { + if m != nil { + return m.Suffix + } + return nil +} + +// * +// ProofSpec defines what the expected parameters are for a given proof type. +// This can be stored in the client and used to validate any incoming proofs. +// +// verify(ProofSpec, Proof) -> Proof | Error +// +// As demonstrated in tests, if we don't fix the algorithm used to calculate the +// LeafHash for a given tree, there are many possible key-value pairs that can +// generate a given hash (by interpretting the preimage differently). +// We need this for proper security, requires client knows a priori what +// tree format server uses. But not in code, rather a configuration object. +type ProofSpec struct { + // any field in the ExistenceProof must be the same as in this spec. + // except Prefix, which is just the first bytes of prefix (spec can be longer) + LeafSpec *LeafOp `protobuf:"bytes,1,opt,name=leaf_spec,json=leafSpec,proto3" json:"leaf_spec,omitempty"` + InnerSpec *InnerSpec `protobuf:"bytes,2,opt,name=inner_spec,json=innerSpec,proto3" json:"inner_spec,omitempty"` + // max_depth (if > 0) is the maximum number of InnerOps allowed (mainly for fixed-depth tries) + MaxDepth int32 `protobuf:"varint,3,opt,name=max_depth,json=maxDepth,proto3" json:"max_depth,omitempty"` + // min_depth (if > 0) is the minimum number of InnerOps allowed (mainly for fixed-depth tries) + MinDepth int32 `protobuf:"varint,4,opt,name=min_depth,json=minDepth,proto3" json:"min_depth,omitempty"` +} + +func (m *ProofSpec) Reset() { *m = ProofSpec{} } +func (m *ProofSpec) String() string { return proto.CompactTextString(m) } +func (*ProofSpec) ProtoMessage() {} +func (*ProofSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{5} +} +func (m *ProofSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProofSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ProofSpec.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ProofSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProofSpec.Merge(m, src) +} +func (m *ProofSpec) XXX_Size() int { + return m.Size() +} +func (m *ProofSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ProofSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ProofSpec proto.InternalMessageInfo + +func (m *ProofSpec) GetLeafSpec() *LeafOp { + if m != nil { + return m.LeafSpec + } + return nil +} + +func (m *ProofSpec) GetInnerSpec() *InnerSpec { + if m != nil { + return m.InnerSpec + } + return nil +} + +func (m *ProofSpec) GetMaxDepth() int32 { + if m != nil { + return m.MaxDepth + } + return 0 +} + +func (m *ProofSpec) GetMinDepth() int32 { + if m != nil { + return m.MinDepth + } + return 0 +} + +// InnerSpec contains all store-specific structure info to determine if two proofs from a +// given store are neighbors. +// +// This enables: +// +// isLeftMost(spec: InnerSpec, op: InnerOp) +// isRightMost(spec: InnerSpec, op: InnerOp) +// isLeftNeighbor(spec: InnerSpec, left: InnerOp, right: InnerOp) +type InnerSpec struct { + // Child order is the ordering of the children node, must count from 0 + // iavl tree is [0, 1] (left then right) + // merk is [0, 2, 1] (left, right, here) + ChildOrder []int32 `protobuf:"varint,1,rep,packed,name=child_order,json=childOrder,proto3" json:"child_order,omitempty"` + ChildSize int32 `protobuf:"varint,2,opt,name=child_size,json=childSize,proto3" json:"child_size,omitempty"` + MinPrefixLength int32 `protobuf:"varint,3,opt,name=min_prefix_length,json=minPrefixLength,proto3" json:"min_prefix_length,omitempty"` + MaxPrefixLength int32 `protobuf:"varint,4,opt,name=max_prefix_length,json=maxPrefixLength,proto3" json:"max_prefix_length,omitempty"` + // empty child is the prehash image that is used when one child is nil (eg. 20 bytes of 0) + EmptyChild []byte `protobuf:"bytes,5,opt,name=empty_child,json=emptyChild,proto3" json:"empty_child,omitempty"` + // hash is the algorithm that must be used for each InnerOp + Hash HashOp `protobuf:"varint,6,opt,name=hash,proto3,enum=ics23.HashOp" json:"hash,omitempty"` +} + +func (m *InnerSpec) Reset() { *m = InnerSpec{} } +func (m *InnerSpec) String() string { return proto.CompactTextString(m) } +func (*InnerSpec) ProtoMessage() {} +func (*InnerSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{6} +} +func (m *InnerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InnerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InnerSpec.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InnerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_InnerSpec.Merge(m, src) +} +func (m *InnerSpec) XXX_Size() int { + return m.Size() +} +func (m *InnerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_InnerSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_InnerSpec proto.InternalMessageInfo + +func (m *InnerSpec) GetChildOrder() []int32 { + if m != nil { + return m.ChildOrder + } + return nil +} + +func (m *InnerSpec) GetChildSize() int32 { + if m != nil { + return m.ChildSize + } + return 0 +} + +func (m *InnerSpec) GetMinPrefixLength() int32 { + if m != nil { + return m.MinPrefixLength + } + return 0 +} + +func (m *InnerSpec) GetMaxPrefixLength() int32 { + if m != nil { + return m.MaxPrefixLength + } + return 0 +} + +func (m *InnerSpec) GetEmptyChild() []byte { + if m != nil { + return m.EmptyChild + } + return nil +} + +func (m *InnerSpec) GetHash() HashOp { + if m != nil { + return m.Hash + } + return HashOp_NO_HASH +} + +// BatchProof is a group of multiple proof types than can be compressed +type BatchProof struct { + Entries []*BatchEntry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` +} + +func (m *BatchProof) Reset() { *m = BatchProof{} } +func (m *BatchProof) String() string { return proto.CompactTextString(m) } +func (*BatchProof) ProtoMessage() {} +func (*BatchProof) Descriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{7} +} +func (m *BatchProof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BatchProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BatchProof.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BatchProof) XXX_Merge(src proto.Message) { + xxx_messageInfo_BatchProof.Merge(m, src) +} +func (m *BatchProof) XXX_Size() int { + return m.Size() +} +func (m *BatchProof) XXX_DiscardUnknown() { + xxx_messageInfo_BatchProof.DiscardUnknown(m) +} + +var xxx_messageInfo_BatchProof proto.InternalMessageInfo + +func (m *BatchProof) GetEntries() []*BatchEntry { + if m != nil { + return m.Entries + } + return nil +} + +// Use BatchEntry not CommitmentProof, to avoid recursion +type BatchEntry struct { + // Types that are valid to be assigned to Proof: + // *BatchEntry_Exist + // *BatchEntry_Nonexist + Proof isBatchEntry_Proof `protobuf_oneof:"proof"` +} + +func (m *BatchEntry) Reset() { *m = BatchEntry{} } +func (m *BatchEntry) String() string { return proto.CompactTextString(m) } +func (*BatchEntry) ProtoMessage() {} +func (*BatchEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{8} +} +func (m *BatchEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BatchEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BatchEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BatchEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_BatchEntry.Merge(m, src) +} +func (m *BatchEntry) XXX_Size() int { + return m.Size() +} +func (m *BatchEntry) XXX_DiscardUnknown() { + xxx_messageInfo_BatchEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_BatchEntry proto.InternalMessageInfo + +type isBatchEntry_Proof interface { + isBatchEntry_Proof() + MarshalTo([]byte) (int, error) + Size() int +} + +type BatchEntry_Exist struct { + Exist *ExistenceProof `protobuf:"bytes,1,opt,name=exist,proto3,oneof" json:"exist,omitempty"` +} +type BatchEntry_Nonexist struct { + Nonexist *NonExistenceProof `protobuf:"bytes,2,opt,name=nonexist,proto3,oneof" json:"nonexist,omitempty"` +} + +func (*BatchEntry_Exist) isBatchEntry_Proof() {} +func (*BatchEntry_Nonexist) isBatchEntry_Proof() {} + +func (m *BatchEntry) GetProof() isBatchEntry_Proof { + if m != nil { + return m.Proof + } + return nil +} + +func (m *BatchEntry) GetExist() *ExistenceProof { + if x, ok := m.GetProof().(*BatchEntry_Exist); ok { + return x.Exist + } + return nil +} + +func (m *BatchEntry) GetNonexist() *NonExistenceProof { + if x, ok := m.GetProof().(*BatchEntry_Nonexist); ok { + return x.Nonexist + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*BatchEntry) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*BatchEntry_Exist)(nil), + (*BatchEntry_Nonexist)(nil), + } +} + +type CompressedBatchProof struct { + Entries []*CompressedBatchEntry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + LookupInners []*InnerOp `protobuf:"bytes,2,rep,name=lookup_inners,json=lookupInners,proto3" json:"lookup_inners,omitempty"` +} + +func (m *CompressedBatchProof) Reset() { *m = CompressedBatchProof{} } +func (m *CompressedBatchProof) String() string { return proto.CompactTextString(m) } +func (*CompressedBatchProof) ProtoMessage() {} +func (*CompressedBatchProof) Descriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{9} +} +func (m *CompressedBatchProof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CompressedBatchProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CompressedBatchProof.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CompressedBatchProof) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompressedBatchProof.Merge(m, src) +} +func (m *CompressedBatchProof) XXX_Size() int { + return m.Size() +} +func (m *CompressedBatchProof) XXX_DiscardUnknown() { + xxx_messageInfo_CompressedBatchProof.DiscardUnknown(m) +} + +var xxx_messageInfo_CompressedBatchProof proto.InternalMessageInfo + +func (m *CompressedBatchProof) GetEntries() []*CompressedBatchEntry { + if m != nil { + return m.Entries + } + return nil +} + +func (m *CompressedBatchProof) GetLookupInners() []*InnerOp { + if m != nil { + return m.LookupInners + } + return nil +} + +// Use BatchEntry not CommitmentProof, to avoid recursion +type CompressedBatchEntry struct { + // Types that are valid to be assigned to Proof: + // *CompressedBatchEntry_Exist + // *CompressedBatchEntry_Nonexist + Proof isCompressedBatchEntry_Proof `protobuf_oneof:"proof"` +} + +func (m *CompressedBatchEntry) Reset() { *m = CompressedBatchEntry{} } +func (m *CompressedBatchEntry) String() string { return proto.CompactTextString(m) } +func (*CompressedBatchEntry) ProtoMessage() {} +func (*CompressedBatchEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{10} +} +func (m *CompressedBatchEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CompressedBatchEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CompressedBatchEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CompressedBatchEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompressedBatchEntry.Merge(m, src) +} +func (m *CompressedBatchEntry) XXX_Size() int { + return m.Size() +} +func (m *CompressedBatchEntry) XXX_DiscardUnknown() { + xxx_messageInfo_CompressedBatchEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_CompressedBatchEntry proto.InternalMessageInfo + +type isCompressedBatchEntry_Proof interface { + isCompressedBatchEntry_Proof() + MarshalTo([]byte) (int, error) + Size() int +} + +type CompressedBatchEntry_Exist struct { + Exist *CompressedExistenceProof `protobuf:"bytes,1,opt,name=exist,proto3,oneof" json:"exist,omitempty"` +} +type CompressedBatchEntry_Nonexist struct { + Nonexist *CompressedNonExistenceProof `protobuf:"bytes,2,opt,name=nonexist,proto3,oneof" json:"nonexist,omitempty"` +} + +func (*CompressedBatchEntry_Exist) isCompressedBatchEntry_Proof() {} +func (*CompressedBatchEntry_Nonexist) isCompressedBatchEntry_Proof() {} + +func (m *CompressedBatchEntry) GetProof() isCompressedBatchEntry_Proof { + if m != nil { + return m.Proof + } + return nil +} + +func (m *CompressedBatchEntry) GetExist() *CompressedExistenceProof { + if x, ok := m.GetProof().(*CompressedBatchEntry_Exist); ok { + return x.Exist + } + return nil +} + +func (m *CompressedBatchEntry) GetNonexist() *CompressedNonExistenceProof { + if x, ok := m.GetProof().(*CompressedBatchEntry_Nonexist); ok { + return x.Nonexist + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*CompressedBatchEntry) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*CompressedBatchEntry_Exist)(nil), + (*CompressedBatchEntry_Nonexist)(nil), + } +} + +type CompressedExistenceProof struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Leaf *LeafOp `protobuf:"bytes,3,opt,name=leaf,proto3" json:"leaf,omitempty"` + // these are indexes into the lookup_inners table in CompressedBatchProof + Path []int32 `protobuf:"varint,4,rep,packed,name=path,proto3" json:"path,omitempty"` +} + +func (m *CompressedExistenceProof) Reset() { *m = CompressedExistenceProof{} } +func (m *CompressedExistenceProof) String() string { return proto.CompactTextString(m) } +func (*CompressedExistenceProof) ProtoMessage() {} +func (*CompressedExistenceProof) Descriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{11} +} +func (m *CompressedExistenceProof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CompressedExistenceProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CompressedExistenceProof.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CompressedExistenceProof) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompressedExistenceProof.Merge(m, src) +} +func (m *CompressedExistenceProof) XXX_Size() int { + return m.Size() +} +func (m *CompressedExistenceProof) XXX_DiscardUnknown() { + xxx_messageInfo_CompressedExistenceProof.DiscardUnknown(m) +} + +var xxx_messageInfo_CompressedExistenceProof proto.InternalMessageInfo + +func (m *CompressedExistenceProof) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *CompressedExistenceProof) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *CompressedExistenceProof) GetLeaf() *LeafOp { + if m != nil { + return m.Leaf + } + return nil +} + +func (m *CompressedExistenceProof) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +type CompressedNonExistenceProof struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Left *CompressedExistenceProof `protobuf:"bytes,2,opt,name=left,proto3" json:"left,omitempty"` + Right *CompressedExistenceProof `protobuf:"bytes,3,opt,name=right,proto3" json:"right,omitempty"` +} + +func (m *CompressedNonExistenceProof) Reset() { *m = CompressedNonExistenceProof{} } +func (m *CompressedNonExistenceProof) String() string { return proto.CompactTextString(m) } +func (*CompressedNonExistenceProof) ProtoMessage() {} +func (*CompressedNonExistenceProof) Descriptor() ([]byte, []int) { + return fileDescriptor_855156e15e7b8e99, []int{12} +} +func (m *CompressedNonExistenceProof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CompressedNonExistenceProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CompressedNonExistenceProof.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CompressedNonExistenceProof) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompressedNonExistenceProof.Merge(m, src) +} +func (m *CompressedNonExistenceProof) XXX_Size() int { + return m.Size() +} +func (m *CompressedNonExistenceProof) XXX_DiscardUnknown() { + xxx_messageInfo_CompressedNonExistenceProof.DiscardUnknown(m) +} + +var xxx_messageInfo_CompressedNonExistenceProof proto.InternalMessageInfo + +func (m *CompressedNonExistenceProof) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *CompressedNonExistenceProof) GetLeft() *CompressedExistenceProof { + if m != nil { + return m.Left + } + return nil +} + +func (m *CompressedNonExistenceProof) GetRight() *CompressedExistenceProof { + if m != nil { + return m.Right + } + return nil +} + +func init() { + proto.RegisterEnum("ics23.HashOp", HashOp_name, HashOp_value) + proto.RegisterEnum("ics23.LengthOp", LengthOp_name, LengthOp_value) + proto.RegisterType((*ExistenceProof)(nil), "ics23.ExistenceProof") + proto.RegisterType((*NonExistenceProof)(nil), "ics23.NonExistenceProof") + proto.RegisterType((*CommitmentProof)(nil), "ics23.CommitmentProof") + proto.RegisterType((*LeafOp)(nil), "ics23.LeafOp") + proto.RegisterType((*InnerOp)(nil), "ics23.InnerOp") + proto.RegisterType((*ProofSpec)(nil), "ics23.ProofSpec") + proto.RegisterType((*InnerSpec)(nil), "ics23.InnerSpec") + proto.RegisterType((*BatchProof)(nil), "ics23.BatchProof") + proto.RegisterType((*BatchEntry)(nil), "ics23.BatchEntry") + proto.RegisterType((*CompressedBatchProof)(nil), "ics23.CompressedBatchProof") + proto.RegisterType((*CompressedBatchEntry)(nil), "ics23.CompressedBatchEntry") + proto.RegisterType((*CompressedExistenceProof)(nil), "ics23.CompressedExistenceProof") + proto.RegisterType((*CompressedNonExistenceProof)(nil), "ics23.CompressedNonExistenceProof") +} + +func init() { proto.RegisterFile("proofs.proto", fileDescriptor_855156e15e7b8e99) } + +var fileDescriptor_855156e15e7b8e99 = []byte{ + // 940 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4b, 0x6f, 0xe3, 0x54, + 0x14, 0xce, 0x4d, 0x62, 0x27, 0x39, 0x69, 0x53, 0xf7, 0xaa, 0x20, 0x4b, 0x15, 0x69, 0xf1, 0x86, + 0x4e, 0x47, 0x14, 0x26, 0x99, 0x06, 0xb1, 0x40, 0xa2, 0x49, 0x3d, 0xc4, 0x6a, 0x69, 0xc2, 0x4d, + 0x18, 0x0d, 0x12, 0x92, 0xe5, 0x49, 0x6f, 0x26, 0xd6, 0x24, 0xb6, 0x65, 0xbb, 0x28, 0x19, 0x81, + 0x90, 0xf8, 0x05, 0xac, 0x61, 0xc7, 0x96, 0x3f, 0xc2, 0xb2, 0x4b, 0x96, 0xa8, 0x5d, 0xb0, 0xe0, + 0x4f, 0xa0, 0xfb, 0x88, 0x9b, 0x27, 0xed, 0x02, 0xcd, 0xee, 0x9e, 0xef, 0x7c, 0xe7, 0x71, 0xcf, + 0xc3, 0xd7, 0xb0, 0x11, 0x84, 0xbe, 0xdf, 0x8f, 0x8e, 0x82, 0xd0, 0x8f, 0x7d, 0xac, 0xb8, 0xbd, + 0xa8, 0x52, 0x35, 0x7e, 0x84, 0x92, 0x39, 0x76, 0xa3, 0x98, 0x7a, 0x3d, 0xda, 0x66, 0x7a, 0xac, + 0x41, 0xe6, 0x35, 0x9d, 0xe8, 0x68, 0x1f, 0x1d, 0x6c, 0x10, 0x76, 0xc4, 0x3b, 0xa0, 0x7c, 0xe7, + 0x0c, 0xaf, 0xa8, 0x9e, 0xe6, 0x98, 0x10, 0xf0, 0xfb, 0x90, 0x1d, 0x52, 0xa7, 0xaf, 0x67, 0xf6, + 0xd1, 0x41, 0xb1, 0xb2, 0x79, 0xc4, 0xfd, 0x1d, 0x9d, 0x53, 0xa7, 0xdf, 0x0a, 0x08, 0x57, 0x61, + 0x03, 0xb2, 0x81, 0x13, 0x0f, 0xf4, 0xec, 0x7e, 0xe6, 0xa0, 0x58, 0x29, 0x49, 0x8a, 0xe5, 0x79, + 0x34, 0x64, 0x1c, 0xa6, 0x33, 0x7e, 0x80, 0xed, 0x0b, 0xdf, 0xbb, 0x37, 0x87, 0x47, 0x2c, 0x5a, + 0x3f, 0xe6, 0x29, 0x14, 0x2b, 0xef, 0x48, 0x57, 0xf3, 0x66, 0x84, 0x53, 0xf0, 0x63, 0x50, 0x42, + 0xf7, 0xd5, 0x20, 0x96, 0x99, 0xad, 0xe1, 0x0a, 0x8e, 0xf1, 0x0f, 0x82, 0xad, 0x86, 0x3f, 0x1a, + 0xb9, 0xf1, 0x88, 0x7a, 0xb1, 0x88, 0xfe, 0x21, 0x28, 0x94, 0x91, 0x79, 0xfc, 0x75, 0x0e, 0x9a, + 0x29, 0x22, 0x58, 0xb8, 0x06, 0x79, 0xcf, 0xf7, 0x84, 0x85, 0x48, 0x4f, 0x97, 0x16, 0x4b, 0x17, + 0x6b, 0xa6, 0x48, 0xc2, 0xc5, 0x8f, 0x40, 0x79, 0xe9, 0xc4, 0xbd, 0x81, 0xcc, 0x73, 0x5b, 0x1a, + 0xd5, 0x19, 0x96, 0x84, 0xe0, 0x0c, 0xfc, 0x19, 0x40, 0xcf, 0x1f, 0x05, 0x21, 0x8d, 0x22, 0x7a, + 0xa9, 0x67, 0x39, 0x7f, 0x57, 0xf2, 0x1b, 0x89, 0x62, 0xce, 0x72, 0xc6, 0xa0, 0x9e, 0x03, 0x85, + 0xf7, 0xde, 0xb8, 0x46, 0xa0, 0x8a, 0x0e, 0xb1, 0xf6, 0x0d, 0x9c, 0x68, 0xc0, 0xef, 0x58, 0x4a, + 0xda, 0xd7, 0x74, 0xa2, 0x01, 0x6b, 0x0d, 0x53, 0xe1, 0x23, 0x28, 0x06, 0x21, 0x65, 0x47, 0x9b, + 0x75, 0x23, 0xbd, 0x8a, 0x09, 0x92, 0x71, 0x46, 0x27, 0xb8, 0x02, 0x9b, 0x53, 0xbe, 0x98, 0x97, + 0xcc, 0x2a, 0x8b, 0x0d, 0xc9, 0x79, 0xce, 0xa7, 0xe8, 0x03, 0x50, 0x87, 0xd4, 0x7b, 0xc5, 0x87, + 0x84, 0x91, 0xb7, 0x92, 0x39, 0x62, 0x60, 0x2b, 0x20, 0x52, 0x8d, 0xdf, 0x05, 0x35, 0x08, 0x69, + 0xdf, 0x1d, 0xeb, 0x0a, 0x9f, 0x0a, 0x29, 0x19, 0xdf, 0x42, 0x4e, 0x0e, 0xd4, 0x43, 0xae, 0x74, + 0xe7, 0x25, 0x3d, 0xeb, 0x85, 0xe1, 0xd1, 0x55, 0x9f, 0xe1, 0x19, 0x81, 0x0b, 0xc9, 0xf8, 0x0d, + 0x41, 0x81, 0x57, 0xb4, 0x13, 0xd0, 0x1e, 0x3e, 0x84, 0x02, 0x9b, 0x6b, 0x3b, 0x0a, 0x68, 0x4f, + 0x0e, 0xc7, 0xc2, 0xdc, 0xe7, 0x99, 0x9e, 0x73, 0x3f, 0x02, 0x70, 0x59, 0x5e, 0x82, 0x2c, 0xe6, + 0x42, 0x9b, 0xdd, 0x00, 0xc6, 0x22, 0x05, 0x77, 0x7a, 0xc4, 0xbb, 0x50, 0x18, 0x39, 0x63, 0xfb, + 0x92, 0x06, 0xb1, 0x18, 0x09, 0x85, 0xe4, 0x47, 0xce, 0xf8, 0x94, 0xc9, 0x5c, 0xe9, 0x7a, 0x52, + 0x99, 0x95, 0x4a, 0xd7, 0xe3, 0x4a, 0xe3, 0x6f, 0x04, 0x85, 0xc4, 0x25, 0xde, 0x83, 0x62, 0x6f, + 0xe0, 0x0e, 0x2f, 0x6d, 0x3f, 0xbc, 0xa4, 0xa1, 0x8e, 0xf6, 0x33, 0x07, 0x0a, 0x01, 0x0e, 0xb5, + 0x18, 0x82, 0xdf, 0x03, 0x21, 0xd9, 0x91, 0xfb, 0x46, 0xec, 0xb4, 0x42, 0x0a, 0x1c, 0xe9, 0xb8, + 0x6f, 0x28, 0x3e, 0x84, 0x6d, 0x16, 0x4a, 0x14, 0xc6, 0x96, 0xcd, 0x11, 0xf9, 0x6c, 0x8d, 0x5c, + 0xaf, 0xcd, 0x71, 0xd1, 0x1e, 0xce, 0x75, 0xc6, 0x0b, 0xdc, 0xac, 0xe4, 0x3a, 0xe3, 0x39, 0xee, + 0x1e, 0x14, 0xe9, 0x28, 0x88, 0x27, 0x36, 0x0f, 0x25, 0xbb, 0x08, 0x1c, 0x6a, 0x30, 0x24, 0x69, + 0x9f, 0xba, 0xb6, 0x7d, 0xc6, 0xa7, 0x00, 0x77, 0x43, 0x8e, 0x1f, 0x43, 0x8e, 0x7a, 0x71, 0xe8, + 0xd2, 0x88, 0xdf, 0x72, 0x61, 0x85, 0x4c, 0x2f, 0x0e, 0x27, 0x64, 0xca, 0x30, 0xbe, 0x97, 0xa6, + 0x1c, 0x7e, 0x4b, 0x2b, 0x7e, 0xb7, 0x78, 0x3f, 0x21, 0xd8, 0x59, 0xb5, 0xa8, 0xf8, 0x78, 0xf1, + 0x0e, 0x6b, 0xd6, 0x7a, 0xfe, 0x36, 0xb8, 0x0a, 0x9b, 0x43, 0xdf, 0x7f, 0x7d, 0x15, 0xd8, 0x7c, + 0x80, 0x22, 0x3d, 0xbd, 0xf2, 0x13, 0xbb, 0x21, 0x48, 0x5c, 0x8c, 0x8c, 0x5f, 0x96, 0x93, 0x10, + 0xd5, 0xf8, 0x64, 0xbe, 0x1a, 0x7b, 0x4b, 0x29, 0xac, 0xab, 0xcb, 0xe7, 0x4b, 0x75, 0x31, 0x96, + 0x6c, 0x1f, 0x58, 0xa1, 0x09, 0xe8, 0xeb, 0xe2, 0xfd, 0x9f, 0x4f, 0x12, 0x9e, 0x79, 0x92, 0x14, + 0xf9, 0x04, 0xfd, 0x8a, 0x60, 0xf7, 0x3f, 0xf2, 0x5d, 0x11, 0xbe, 0x3a, 0xf7, 0x1a, 0xdd, 0x57, + 0x2f, 0xf9, 0x2e, 0x1d, 0xcf, 0xbf, 0x4b, 0xf7, 0x5a, 0x09, 0xf6, 0x21, 0x05, 0x55, 0xec, 0x00, + 0x2e, 0x42, 0xee, 0xa2, 0x65, 0x37, 0x4f, 0x3a, 0x4d, 0x2d, 0x85, 0x01, 0xd4, 0x4e, 0xf3, 0xa4, + 0x72, 0x5c, 0xd3, 0x90, 0x3c, 0x1f, 0x3f, 0xa9, 0x68, 0x69, 0x76, 0x3e, 0x33, 0x1b, 0x8d, 0x93, + 0x33, 0x2d, 0x83, 0x37, 0xa1, 0x40, 0xac, 0xb6, 0xf9, 0xe5, 0xe9, 0x93, 0xda, 0xc7, 0x5a, 0x96, + 0xd9, 0xd7, 0xad, 0x6e, 0xa3, 0x65, 0x5d, 0x68, 0x0a, 0x2e, 0x01, 0x08, 0x1b, 0x9b, 0xf9, 0x50, + 0x0f, 0x7f, 0x47, 0x90, 0x9f, 0x7e, 0x74, 0x99, 0xe1, 0x45, 0xcb, 0x6e, 0x13, 0xf3, 0x99, 0xf5, + 0x42, 0x4b, 0x31, 0xf1, 0xf9, 0x09, 0xb1, 0xdb, 0xa4, 0xd5, 0x6d, 0x69, 0x88, 0xf9, 0x61, 0x22, + 0x39, 0x6f, 0x6b, 0x69, 0xbc, 0x05, 0xc5, 0x67, 0xd6, 0x0b, 0xf3, 0xb4, 0x5a, 0xb1, 0xeb, 0xd6, + 0x17, 0x5a, 0x06, 0x63, 0x28, 0x4d, 0x81, 0x73, 0xab, 0xdb, 0x3d, 0x37, 0xb5, 0x6c, 0x42, 0xaa, + 0x3d, 0xe5, 0x24, 0x25, 0x21, 0xd5, 0x9e, 0x4e, 0x49, 0x2a, 0xde, 0x01, 0x8d, 0x98, 0x5f, 0x7d, + 0x6d, 0x11, 0xd3, 0x66, 0xce, 0xbe, 0xe9, 0x9a, 0x1d, 0x2d, 0x37, 0x8b, 0x32, 0x6b, 0x8e, 0xe6, + 0xeb, 0xfa, 0x1f, 0x37, 0x65, 0x74, 0x7d, 0x53, 0x46, 0x7f, 0xdd, 0x94, 0xd1, 0xcf, 0xb7, 0xe5, + 0xd4, 0xf5, 0x6d, 0x39, 0xf5, 0xe7, 0x6d, 0x39, 0xf5, 0x52, 0xe5, 0xbf, 0x37, 0xd5, 0x7f, 0x03, + 0x00, 0x00, 0xff, 0xff, 0xd4, 0x7d, 0x87, 0x8f, 0xee, 0x08, 0x00, 0x00, +} + +func (m *ExistenceProof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExistenceProof) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExistenceProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Path) > 0 { + for iNdEx := len(m.Path) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Path[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.Leaf != nil { + { + size, err := m.Leaf.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintProofs(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintProofs(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NonExistenceProof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NonExistenceProof) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonExistenceProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Right != nil { + { + size, err := m.Right.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Left != nil { + { + size, err := m.Left.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintProofs(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CommitmentProof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommitmentProof) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommitmentProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Proof != nil { + { + size := m.Proof.Size() + i -= size + if _, err := m.Proof.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *CommitmentProof_Exist) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommitmentProof_Exist) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Exist != nil { + { + size, err := m.Exist.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *CommitmentProof_Nonexist) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommitmentProof_Nonexist) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Nonexist != nil { + { + size, err := m.Nonexist.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *CommitmentProof_Batch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommitmentProof_Batch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Batch != nil { + { + size, err := m.Batch.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *CommitmentProof_Compressed) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommitmentProof_Compressed) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Compressed != nil { + { + size, err := m.Compressed.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *LeafOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeafOp) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeafOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Prefix) > 0 { + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = encodeVarintProofs(dAtA, i, uint64(len(m.Prefix))) + i-- + dAtA[i] = 0x2a + } + if m.Length != 0 { + i = encodeVarintProofs(dAtA, i, uint64(m.Length)) + i-- + dAtA[i] = 0x20 + } + if m.PrehashValue != 0 { + i = encodeVarintProofs(dAtA, i, uint64(m.PrehashValue)) + i-- + dAtA[i] = 0x18 + } + if m.PrehashKey != 0 { + i = encodeVarintProofs(dAtA, i, uint64(m.PrehashKey)) + i-- + dAtA[i] = 0x10 + } + if m.Hash != 0 { + i = encodeVarintProofs(dAtA, i, uint64(m.Hash)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *InnerOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InnerOp) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InnerOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Suffix) > 0 { + i -= len(m.Suffix) + copy(dAtA[i:], m.Suffix) + i = encodeVarintProofs(dAtA, i, uint64(len(m.Suffix))) + i-- + dAtA[i] = 0x1a + } + if len(m.Prefix) > 0 { + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = encodeVarintProofs(dAtA, i, uint64(len(m.Prefix))) + i-- + dAtA[i] = 0x12 + } + if m.Hash != 0 { + i = encodeVarintProofs(dAtA, i, uint64(m.Hash)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ProofSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProofSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProofSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MinDepth != 0 { + i = encodeVarintProofs(dAtA, i, uint64(m.MinDepth)) + i-- + dAtA[i] = 0x20 + } + if m.MaxDepth != 0 { + i = encodeVarintProofs(dAtA, i, uint64(m.MaxDepth)) + i-- + dAtA[i] = 0x18 + } + if m.InnerSpec != nil { + { + size, err := m.InnerSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.LeafSpec != nil { + { + size, err := m.LeafSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *InnerSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InnerSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InnerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Hash != 0 { + i = encodeVarintProofs(dAtA, i, uint64(m.Hash)) + i-- + dAtA[i] = 0x30 + } + if len(m.EmptyChild) > 0 { + i -= len(m.EmptyChild) + copy(dAtA[i:], m.EmptyChild) + i = encodeVarintProofs(dAtA, i, uint64(len(m.EmptyChild))) + i-- + dAtA[i] = 0x2a + } + if m.MaxPrefixLength != 0 { + i = encodeVarintProofs(dAtA, i, uint64(m.MaxPrefixLength)) + i-- + dAtA[i] = 0x20 + } + if m.MinPrefixLength != 0 { + i = encodeVarintProofs(dAtA, i, uint64(m.MinPrefixLength)) + i-- + dAtA[i] = 0x18 + } + if m.ChildSize != 0 { + i = encodeVarintProofs(dAtA, i, uint64(m.ChildSize)) + i-- + dAtA[i] = 0x10 + } + if len(m.ChildOrder) > 0 { + dAtA11 := make([]byte, len(m.ChildOrder)*10) + var j10 int + for _, num1 := range m.ChildOrder { + num := uint64(num1) + for num >= 1<<7 { + dAtA11[j10] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j10++ + } + dAtA11[j10] = uint8(num) + j10++ + } + i -= j10 + copy(dAtA[i:], dAtA11[:j10]) + i = encodeVarintProofs(dAtA, i, uint64(j10)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BatchProof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BatchProof) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BatchProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Entries) > 0 { + for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *BatchEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BatchEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BatchEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Proof != nil { + { + size := m.Proof.Size() + i -= size + if _, err := m.Proof.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *BatchEntry_Exist) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BatchEntry_Exist) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Exist != nil { + { + size, err := m.Exist.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *BatchEntry_Nonexist) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BatchEntry_Nonexist) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Nonexist != nil { + { + size, err := m.Nonexist.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *CompressedBatchProof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompressedBatchProof) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CompressedBatchProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LookupInners) > 0 { + for iNdEx := len(m.LookupInners) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.LookupInners[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Entries) > 0 { + for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CompressedBatchEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompressedBatchEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CompressedBatchEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Proof != nil { + { + size := m.Proof.Size() + i -= size + if _, err := m.Proof.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *CompressedBatchEntry_Exist) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CompressedBatchEntry_Exist) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Exist != nil { + { + size, err := m.Exist.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *CompressedBatchEntry_Nonexist) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CompressedBatchEntry_Nonexist) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Nonexist != nil { + { + size, err := m.Nonexist.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *CompressedExistenceProof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompressedExistenceProof) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CompressedExistenceProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Path) > 0 { + dAtA17 := make([]byte, len(m.Path)*10) + var j16 int + for _, num1 := range m.Path { + num := uint64(num1) + for num >= 1<<7 { + dAtA17[j16] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j16++ + } + dAtA17[j16] = uint8(num) + j16++ + } + i -= j16 + copy(dAtA[i:], dAtA17[:j16]) + i = encodeVarintProofs(dAtA, i, uint64(j16)) + i-- + dAtA[i] = 0x22 + } + if m.Leaf != nil { + { + size, err := m.Leaf.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintProofs(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintProofs(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CompressedNonExistenceProof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompressedNonExistenceProof) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CompressedNonExistenceProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Right != nil { + { + size, err := m.Right.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Left != nil { + { + size, err := m.Left.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProofs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintProofs(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintProofs(dAtA []byte, offset int, v uint64) int { + offset -= sovProofs(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ExistenceProof) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovProofs(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovProofs(uint64(l)) + } + if m.Leaf != nil { + l = m.Leaf.Size() + n += 1 + l + sovProofs(uint64(l)) + } + if len(m.Path) > 0 { + for _, e := range m.Path { + l = e.Size() + n += 1 + l + sovProofs(uint64(l)) + } + } + return n +} + +func (m *NonExistenceProof) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovProofs(uint64(l)) + } + if m.Left != nil { + l = m.Left.Size() + n += 1 + l + sovProofs(uint64(l)) + } + if m.Right != nil { + l = m.Right.Size() + n += 1 + l + sovProofs(uint64(l)) + } + return n +} + +func (m *CommitmentProof) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Proof != nil { + n += m.Proof.Size() + } + return n +} + +func (m *CommitmentProof_Exist) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Exist != nil { + l = m.Exist.Size() + n += 1 + l + sovProofs(uint64(l)) + } + return n +} +func (m *CommitmentProof_Nonexist) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Nonexist != nil { + l = m.Nonexist.Size() + n += 1 + l + sovProofs(uint64(l)) + } + return n +} +func (m *CommitmentProof_Batch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Batch != nil { + l = m.Batch.Size() + n += 1 + l + sovProofs(uint64(l)) + } + return n +} +func (m *CommitmentProof_Compressed) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Compressed != nil { + l = m.Compressed.Size() + n += 1 + l + sovProofs(uint64(l)) + } + return n +} +func (m *LeafOp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Hash != 0 { + n += 1 + sovProofs(uint64(m.Hash)) + } + if m.PrehashKey != 0 { + n += 1 + sovProofs(uint64(m.PrehashKey)) + } + if m.PrehashValue != 0 { + n += 1 + sovProofs(uint64(m.PrehashValue)) + } + if m.Length != 0 { + n += 1 + sovProofs(uint64(m.Length)) + } + l = len(m.Prefix) + if l > 0 { + n += 1 + l + sovProofs(uint64(l)) + } + return n +} + +func (m *InnerOp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Hash != 0 { + n += 1 + sovProofs(uint64(m.Hash)) + } + l = len(m.Prefix) + if l > 0 { + n += 1 + l + sovProofs(uint64(l)) + } + l = len(m.Suffix) + if l > 0 { + n += 1 + l + sovProofs(uint64(l)) + } + return n +} + +func (m *ProofSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LeafSpec != nil { + l = m.LeafSpec.Size() + n += 1 + l + sovProofs(uint64(l)) + } + if m.InnerSpec != nil { + l = m.InnerSpec.Size() + n += 1 + l + sovProofs(uint64(l)) + } + if m.MaxDepth != 0 { + n += 1 + sovProofs(uint64(m.MaxDepth)) + } + if m.MinDepth != 0 { + n += 1 + sovProofs(uint64(m.MinDepth)) + } + return n +} + +func (m *InnerSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ChildOrder) > 0 { + l = 0 + for _, e := range m.ChildOrder { + l += sovProofs(uint64(e)) + } + n += 1 + sovProofs(uint64(l)) + l + } + if m.ChildSize != 0 { + n += 1 + sovProofs(uint64(m.ChildSize)) + } + if m.MinPrefixLength != 0 { + n += 1 + sovProofs(uint64(m.MinPrefixLength)) + } + if m.MaxPrefixLength != 0 { + n += 1 + sovProofs(uint64(m.MaxPrefixLength)) + } + l = len(m.EmptyChild) + if l > 0 { + n += 1 + l + sovProofs(uint64(l)) + } + if m.Hash != 0 { + n += 1 + sovProofs(uint64(m.Hash)) + } + return n +} + +func (m *BatchProof) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Entries) > 0 { + for _, e := range m.Entries { + l = e.Size() + n += 1 + l + sovProofs(uint64(l)) + } + } + return n +} + +func (m *BatchEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Proof != nil { + n += m.Proof.Size() + } + return n +} + +func (m *BatchEntry_Exist) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Exist != nil { + l = m.Exist.Size() + n += 1 + l + sovProofs(uint64(l)) + } + return n +} +func (m *BatchEntry_Nonexist) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Nonexist != nil { + l = m.Nonexist.Size() + n += 1 + l + sovProofs(uint64(l)) + } + return n +} +func (m *CompressedBatchProof) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Entries) > 0 { + for _, e := range m.Entries { + l = e.Size() + n += 1 + l + sovProofs(uint64(l)) + } + } + if len(m.LookupInners) > 0 { + for _, e := range m.LookupInners { + l = e.Size() + n += 1 + l + sovProofs(uint64(l)) + } + } + return n +} + +func (m *CompressedBatchEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Proof != nil { + n += m.Proof.Size() + } + return n +} + +func (m *CompressedBatchEntry_Exist) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Exist != nil { + l = m.Exist.Size() + n += 1 + l + sovProofs(uint64(l)) + } + return n +} +func (m *CompressedBatchEntry_Nonexist) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Nonexist != nil { + l = m.Nonexist.Size() + n += 1 + l + sovProofs(uint64(l)) + } + return n +} +func (m *CompressedExistenceProof) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovProofs(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovProofs(uint64(l)) + } + if m.Leaf != nil { + l = m.Leaf.Size() + n += 1 + l + sovProofs(uint64(l)) + } + if len(m.Path) > 0 { + l = 0 + for _, e := range m.Path { + l += sovProofs(uint64(e)) + } + n += 1 + sovProofs(uint64(l)) + l + } + return n +} + +func (m *CompressedNonExistenceProof) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovProofs(uint64(l)) + } + if m.Left != nil { + l = m.Left.Size() + n += 1 + l + sovProofs(uint64(l)) + } + if m.Right != nil { + l = m.Right.Size() + n += 1 + l + sovProofs(uint64(l)) + } + return n +} + +func sovProofs(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozProofs(x uint64) (n int) { + return sovProofs(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ExistenceProof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExistenceProof: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExistenceProof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leaf", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Leaf == nil { + m.Leaf = &LeafOp{} + } + if err := m.Leaf.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path, &InnerOp{}) + if err := m.Path[len(m.Path)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProofs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProofs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NonExistenceProof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NonExistenceProof: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NonExistenceProof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Left", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Left == nil { + m.Left = &ExistenceProof{} + } + if err := m.Left.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Right", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Right == nil { + m.Right = &ExistenceProof{} + } + if err := m.Right.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProofs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProofs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommitmentProof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommitmentProof: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommitmentProof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exist", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ExistenceProof{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Proof = &CommitmentProof_Exist{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nonexist", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &NonExistenceProof{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Proof = &CommitmentProof_Nonexist{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Batch", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &BatchProof{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Proof = &CommitmentProof_Batch{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Compressed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &CompressedBatchProof{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Proof = &CommitmentProof_Compressed{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProofs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProofs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeafOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeafOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeafOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + m.Hash = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Hash |= HashOp(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrehashKey", wireType) + } + m.PrehashKey = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PrehashKey |= HashOp(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrehashValue", wireType) + } + m.PrehashValue = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PrehashValue |= HashOp(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + } + m.Length = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Length |= LengthOp(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Prefix = append(m.Prefix[:0], dAtA[iNdEx:postIndex]...) + if m.Prefix == nil { + m.Prefix = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProofs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProofs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InnerOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InnerOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InnerOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + m.Hash = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Hash |= HashOp(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Prefix = append(m.Prefix[:0], dAtA[iNdEx:postIndex]...) + if m.Prefix == nil { + m.Prefix = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Suffix", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Suffix = append(m.Suffix[:0], dAtA[iNdEx:postIndex]...) + if m.Suffix == nil { + m.Suffix = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProofs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProofs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProofSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProofSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProofSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeafSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LeafSpec == nil { + m.LeafSpec = &LeafOp{} + } + if err := m.LeafSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InnerSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.InnerSpec == nil { + m.InnerSpec = &InnerSpec{} + } + if err := m.InnerSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxDepth", wireType) + } + m.MaxDepth = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxDepth |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinDepth", wireType) + } + m.MinDepth = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinDepth |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipProofs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProofs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InnerSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InnerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InnerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ChildOrder = append(m.ChildOrder, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.ChildOrder) == 0 { + m.ChildOrder = make([]int32, 0, elementCount) + } + for iNdEx < postIndex { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ChildOrder = append(m.ChildOrder, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field ChildOrder", wireType) + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ChildSize", wireType) + } + m.ChildSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ChildSize |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinPrefixLength", wireType) + } + m.MinPrefixLength = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinPrefixLength |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPrefixLength", wireType) + } + m.MaxPrefixLength = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxPrefixLength |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EmptyChild", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EmptyChild = append(m.EmptyChild[:0], dAtA[iNdEx:postIndex]...) + if m.EmptyChild == nil { + m.EmptyChild = []byte{} + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + m.Hash = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Hash |= HashOp(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipProofs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProofs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BatchProof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BatchProof: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BatchProof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entries = append(m.Entries, &BatchEntry{}) + if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProofs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProofs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BatchEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BatchEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BatchEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exist", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ExistenceProof{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Proof = &BatchEntry_Exist{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nonexist", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &NonExistenceProof{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Proof = &BatchEntry_Nonexist{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProofs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProofs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompressedBatchProof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompressedBatchProof: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompressedBatchProof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entries = append(m.Entries, &CompressedBatchEntry{}) + if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LookupInners", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LookupInners = append(m.LookupInners, &InnerOp{}) + if err := m.LookupInners[len(m.LookupInners)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProofs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProofs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompressedBatchEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompressedBatchEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompressedBatchEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exist", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &CompressedExistenceProof{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Proof = &CompressedBatchEntry_Exist{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nonexist", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &CompressedNonExistenceProof{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Proof = &CompressedBatchEntry_Nonexist{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProofs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProofs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompressedExistenceProof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompressedExistenceProof: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompressedExistenceProof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leaf", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Leaf == nil { + m.Leaf = &LeafOp{} + } + if err := m.Leaf.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType == 0 { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Path = append(m.Path, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Path) == 0 { + m.Path = make([]int32, 0, elementCount) + } + for iNdEx < postIndex { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Path = append(m.Path, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipProofs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProofs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompressedNonExistenceProof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompressedNonExistenceProof: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompressedNonExistenceProof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Left", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Left == nil { + m.Left = &CompressedExistenceProof{} + } + if err := m.Left.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Right", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProofs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProofs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProofs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Right == nil { + m.Right = &CompressedExistenceProof{} + } + if err := m.Right.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProofs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProofs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipProofs(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProofs + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProofs + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProofs + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthProofs + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupProofs + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthProofs + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthProofs = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowProofs = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupProofs = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/cosmos/staking/v1beta1/tx.proto b/proto/cosmos/staking/v1beta1/tx.proto index 7b05d89eea2..d074fe010e3 100644 --- a/proto/cosmos/staking/v1beta1/tx.proto +++ b/proto/cosmos/staking/v1beta1/tx.proto @@ -81,8 +81,7 @@ message MsgEditValidatorResponse {} // MsgDelegate defines a SDK message for performing a delegation of coins // from a delegator to a validator. message MsgDelegate { - option (gogoproto.equal) = false; - option (gogoproto.goproto_getters) = false; + option (gogoproto.equal) = false; string delegator_address = 1 [(gogoproto.moretags) = "yaml:\"delegator_address\""]; string validator_address = 2 [(gogoproto.moretags) = "yaml:\"validator_address\""]; @@ -95,8 +94,7 @@ message MsgDelegateResponse {} // MsgBeginRedelegate defines a SDK message for performing a redelegation // of coins from a delegator and source validator to a destination validator. message MsgBeginRedelegate { - option (gogoproto.equal) = false; - option (gogoproto.goproto_getters) = false; + option (gogoproto.equal) = false; string delegator_address = 1 [(gogoproto.moretags) = "yaml:\"delegator_address\""]; string validator_src_address = 2 [(gogoproto.moretags) = "yaml:\"validator_src_address\""]; @@ -112,8 +110,7 @@ message MsgBeginRedelegateResponse { // MsgUndelegate defines a SDK message for performing an undelegation from a // delegate and a validator. message MsgUndelegate { - option (gogoproto.equal) = false; - option (gogoproto.goproto_getters) = false; + option (gogoproto.equal) = false; string delegator_address = 1 [(gogoproto.moretags) = "yaml:\"delegator_address\""]; string validator_address = 2 [(gogoproto.moretags) = "yaml:\"validator_address\""]; diff --git a/scripts/protocgen.sh b/scripts/protocgen.sh index 20b8d702006..f02f9cc81e0 100755 --- a/scripts/protocgen.sh +++ b/scripts/protocgen.sh @@ -38,6 +38,9 @@ go mod tidy buf protoc -I "proto" -I "third_party/proto" -I "testutil/testdata" --gocosmos_out=plugins=interfacetype+grpc,\ Mgoogle/protobuf/any.proto=github.com/cosmos/cosmos-sdk/codec/types:. ./testutil/testdata/*.proto +# generate baseapp test messages +(cd baseapp/testutil; buf generate) + # move proto files to the right places cp -r github.com/cosmos/cosmos-sdk/* ./ rm -rf github.com diff --git a/server/config/config.go b/server/config/config.go index 6925078158d..e7dc56ae833 100644 --- a/server/config/config.go +++ b/server/config/config.go @@ -216,7 +216,7 @@ func DefaultConfig() *Config { MinRetainBlocks: 0, IndexEvents: make([]string, 0), IAVLCacheSize: 781250, // 50 MB - IAVLDisableFastNode: false, + IAVLDisableFastNode: true, }, Telemetry: telemetry.Config{ Enabled: false, diff --git a/server/config/toml.go b/server/config/toml.go index 91dee199a77..523e97b9e36 100644 --- a/server/config/toml.go +++ b/server/config/toml.go @@ -75,7 +75,7 @@ index-events = {{ .BaseConfig.IndexEvents }} iavl-cache-size = {{ .BaseConfig.IAVLCacheSize }} # IAVLDisableFastNode enables or disables the fast node feature of IAVL. -# Default is false. +# Default is true. iavl-disable-fastnode = {{ .BaseConfig.IAVLDisableFastNode }} ############################################################################### diff --git a/server/mock/app.go b/server/mock/app.go index f31c6e948f4..60ea1ebc94a 100644 --- a/server/mock/app.go +++ b/server/mock/app.go @@ -1,18 +1,19 @@ package mock import ( + "context" "encoding/json" "errors" "fmt" "path/filepath" - "github.com/tendermint/tendermint/types" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/types" bam "github.com/cosmos/cosmos-sdk/baseapp" "github.com/cosmos/cosmos-sdk/codec" + storetypes "github.com/cosmos/cosmos-sdk/store/types" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -36,7 +37,6 @@ func NewApp(rootDir string, logger log.Logger) (abci.Application, error) { baseApp.SetInitChainer(InitChainer(capKeyMainStore)) - // Set a Route. baseApp.Router().AddRoute(sdk.NewRoute("kvstore", KVStoreHandler(capKeyMainStore))) // Load latest version. @@ -128,3 +128,16 @@ func AppGenStateEmpty(_ *codec.LegacyAmino, _ types.GenesisDoc, _ []json.RawMess appState = json.RawMessage(``) return } + +// Manually write the handlers for this custom message +type MsgServer interface { + Test(ctx context.Context, msg *kvstoreTx) (*sdk.Result, error) +} + +type MsgServerImpl struct { + capKeyMainStore *storetypes.KVStoreKey +} + +func (m MsgServerImpl) Test(ctx context.Context, msg *kvstoreTx) (*sdk.Result, error) { + return KVStoreHandler(m.capKeyMainStore)(sdk.UnwrapSDKContext(ctx), msg) +} diff --git a/server/start.go b/server/start.go index 894df424218..ec536ba20ec 100644 --- a/server/start.go +++ b/server/start.go @@ -165,6 +165,8 @@ is performed. Note, when enabled, gRPC will also be automatically enabled. cmd.Flags().Uint64(FlagStateSyncSnapshotInterval, 0, "State sync snapshot interval") cmd.Flags().Uint32(FlagStateSyncSnapshotKeepRecent, 2, "State sync snapshot to keep") + cmd.Flags().Bool(FlagIAVLFastNode, true, "Enable fast node for IAVL tree") + // add support for all Tendermint-specific command line options tcmd.AddNodeFlags(cmd) return cmd diff --git a/simapp/helpers/test_helpers.go b/simapp/helpers/test_helpers.go index d3465ab7c38..1eb496e57a7 100644 --- a/simapp/helpers/test_helpers.go +++ b/simapp/helpers/test_helpers.go @@ -2,6 +2,7 @@ package helpers import ( "math/rand" + "time" "github.com/cosmos/cosmos-sdk/client" cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" @@ -18,10 +19,12 @@ const ( ) // GenTx generates a signed mock transaction. -func GenTx(r *rand.Rand, gen client.TxConfig, msgs []sdk.Msg, feeAmt sdk.Coins, gas uint64, chainID string, accNums, accSeqs []uint64, priv ...cryptotypes.PrivKey) (sdk.Tx, error) { +func GenTx(gen client.TxConfig, msgs []sdk.Msg, feeAmt sdk.Coins, gas uint64, chainID string, accNums, accSeqs []uint64, priv ...cryptotypes.PrivKey) (sdk.Tx, error) { sigs := make([]signing.SignatureV2, len(priv)) // create a random length memo + r := rand.New(rand.NewSource(time.Now().UnixNano())) + memo := simulation.RandStringOfLength(r, simulation.RandIntBetween(r, 0, 100)) signMode := gen.SignModeHandler().DefaultMode() diff --git a/simapp/test_helpers.go b/simapp/test_helpers.go index ff641f2c689..1e1bd249c11 100644 --- a/simapp/test_helpers.go +++ b/simapp/test_helpers.go @@ -5,7 +5,6 @@ import ( "encoding/hex" "encoding/json" "fmt" - "math/rand" "strconv" "testing" "time" @@ -327,7 +326,6 @@ func SignCheckDeliver( chainID string, accNums, accSeqs []uint64, expSimPass, expPass bool, priv ...cryptotypes.PrivKey, ) (sdk.GasInfo, *sdk.Result, error) { tx, err := helpers.GenTx( - rand.New(rand.NewSource(time.Now().UnixNano())), txCfg, msgs, sdk.Coins{sdk.NewInt64Coin(sdk.DefaultBondDenom, 0)}, @@ -378,7 +376,6 @@ func GenSequenceOfTxs(txGen client.TxConfig, msgs []sdk.Msg, accNums []uint64, i var err error for i := 0; i < numToGenerate; i++ { txs[i], err = helpers.GenTx( - rand.New(rand.NewSource(time.Now().UnixNano())), txGen, msgs, sdk.Coins{sdk.NewInt64Coin(sdk.DefaultBondDenom, 0)}, diff --git a/store/cachekv/mergeiterator.go b/store/cachekv/mergeiterator.go index 25dfac80332..b8bbee6c5dd 100644 --- a/store/cachekv/mergeiterator.go +++ b/store/cachekv/mergeiterator.go @@ -33,7 +33,8 @@ func newCacheMergeIterator(parent, cache types.Iterator, ascending bool) *cacheM } // Domain implements Iterator. -// If the domains are different, returns the union. +// It returns the union of the iter.Parent doman, and the iter.Cache domain. +// If the domains are disjoint, this includes the domain in between them as well. func (iter *cacheMergeIterator) Domain() (start, end []byte) { startP, endP := iter.parent.Domain() startC, endC := iter.cache.Domain() diff --git a/store/cachekv/store.go b/store/cachekv/store.go index 7004073ea16..e596689877f 100644 --- a/store/cachekv/store.go +++ b/store/cachekv/store.go @@ -313,17 +313,7 @@ func (store *Store) dirtyItems(start, end []byte) { } sort.Strings(strL) - // Now find the values within the domain - // [start, end) - startIndex := findStartIndex(strL, startStr) - endIndex := findEndIndex(strL, endStr) - - if endIndex < 0 { - endIndex = len(strL) - 1 - } - if startIndex < 0 { - startIndex = 0 - } + startIndex, endIndex := findStartEndIndex(strL, startStr, endStr) // Since we spent cycles to sort the values, we should process and remove a reasonable amount // ensure start to end is at least minSortSize in size @@ -347,17 +337,23 @@ func (store *Store) dirtyItems(start, end []byte) { store.clearUnsortedCacheSubset(kvL, stateAlreadySorted) } -func (store *Store) clearUnsortedCacheSubset(unsorted []*kv.Pair, sortState sortState) { - n := len(store.unsortedCache) - if len(unsorted) == n { // This pattern allows the Go compiler to emit the map clearing idiom for the entire map. - for key := range store.unsortedCache { - delete(store.unsortedCache, key) - } - } else { // Otherwise, normally delete the unsorted keys from the map. - for _, kv := range unsorted { - delete(store.unsortedCache, conv.UnsafeBytesToStr(kv.Key)) - } +func findStartEndIndex(strL []string, startStr, endStr string) (int, int) { + // Now find the values within the domain + // [start, end) + startIndex := findStartIndex(strL, startStr) + endIndex := findEndIndex(strL, endStr) + + if endIndex < 0 { + endIndex = len(strL) - 1 + } + if startIndex < 0 { + startIndex = 0 } + return startIndex, endIndex +} + +func (store *Store) clearUnsortedCacheSubset(unsorted []*kv.Pair, sortState sortState) { + store.deleteKeysFromUnsortedCache(unsorted) if sortState == stateUnsorted { sort.Slice(unsorted, func(i, j int) bool { @@ -369,6 +365,7 @@ func (store *Store) clearUnsortedCacheSubset(unsorted []*kv.Pair, sortState sort if item.Value == nil { // deleted element, tracked by store.deleted // setting arbitrary value + // TODO: Don't ignore this error. store.sortedCache.Set(item.Key, []byte{}) continue } @@ -379,11 +376,26 @@ func (store *Store) clearUnsortedCacheSubset(unsorted []*kv.Pair, sortState sort } } +func (store *Store) deleteKeysFromUnsortedCache(unsorted []*kv.Pair) { + n := len(store.unsortedCache) + if len(unsorted) == n { // This pattern allows the Go compiler to emit the map clearing idiom for the entire map. + for key := range store.unsortedCache { + delete(store.unsortedCache, key) + } + } else { // Otherwise, normally delete the unsorted keys from the map. + for _, kv := range unsorted { + delete(store.unsortedCache, conv.UnsafeBytesToStr(kv.Key)) + } + } +} + //---------------------------------------- // etc // Only entrypoint to mutate store.cache. func (store *Store) setCacheValue(key, value []byte, deleted bool, dirty bool) { + types.AssertValidKey(key) + keyStr := conv.UnsafeBytesToStr(key) store.cache[keyStr] = &cValue{ value: value, @@ -395,7 +407,7 @@ func (store *Store) setCacheValue(key, value []byte, deleted bool, dirty bool) { delete(store.deleted, keyStr) } if dirty { - store.unsortedCache[conv.UnsafeBytesToStr(key)] = struct{}{} + store.unsortedCache[keyStr] = struct{}{} } } diff --git a/store/rootmulti/rollback_test.go b/store/rootmulti/rollback_test.go index 2de0b38a836..c2e199606e2 100644 --- a/store/rootmulti/rollback_test.go +++ b/store/rootmulti/rollback_test.go @@ -46,6 +46,7 @@ func SetupWithDB(isCheckTx bool, db dbm.DB) *simapp.SimApp { } func TestRollback(t *testing.T) { + t.Skip() db := dbm.NewMemDB() app := SetupWithDB(false, db) app.Commit() diff --git a/store/rootmulti/store.go b/store/rootmulti/store.go index 642e21ce414..7e5d032850e 100644 --- a/store/rootmulti/store.go +++ b/store/rootmulti/store.go @@ -15,6 +15,7 @@ import ( "github.com/pkg/errors" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/proto/tendermint/crypto" dbm "github.com/tendermint/tm-db" snapshottypes "github.com/cosmos/cosmos-sdk/snapshots/types" @@ -33,9 +34,11 @@ const ( latestVersionKey = "s/latest" pruneHeightsKey = "s/pruneheights" commitInfoKeyFmt = "s/%d" // s/ + + proofsPath = "proofs" ) -const iavlDisablefastNodeDefault = false +const iavlDisablefastNodeDefault = true // Store is composed of many CommitStores. Name contrasts with // cacheMultiStore which is used for branching other MultiStores. It implements @@ -238,9 +241,7 @@ func (rs *Store) loadVersion(ver int64, upgrades *types.StoreUpgrades) error { // If it was deleted, remove all data if upgrades.IsDeleted(key.Name()) { - if err := deleteKVStore(store.(types.KVStore)); err != nil { - return errors.Wrapf(err, "failed to delete store %s", key.Name()) - } + deleteKVStore(store.(types.KVStore)) } else if oldName := upgrades.RenamedFrom(key.Name()); oldName != "" { // handle renames specially // make an unregistered key to satify loadCommitStore params @@ -255,9 +256,7 @@ func (rs *Store) loadVersion(ver int64, upgrades *types.StoreUpgrades) error { } // move all data - if err := moveKVStoreData(oldStore.(types.KVStore), store.(types.KVStore)); err != nil { - return errors.Wrapf(err, "failed to move store %s -> %s", oldName, key.Name()) - } + moveKVStoreData(oldStore.(types.KVStore), store.(types.KVStore)) } } @@ -282,7 +281,7 @@ func (rs *Store) getCommitID(infos map[string]types.StoreInfo, name string) type return info.CommitId } -func deleteKVStore(kv types.KVStore) error { +func deleteKVStore(kv types.KVStore) { // Note that we cannot write while iterating, so load all keys here, delete below var keys [][]byte itr := kv.Iterator(nil, nil) @@ -295,11 +294,10 @@ func deleteKVStore(kv types.KVStore) error { for _, k := range keys { kv.Delete(k) } - return nil } // we simulate move by a copy and delete -func moveKVStoreData(oldDB types.KVStore, newDB types.KVStore) error { +func moveKVStoreData(oldDB types.KVStore, newDB types.KVStore) { // we read from one and write to another itr := oldDB.Iterator(nil, nil) for itr.Valid() { @@ -309,7 +307,7 @@ func moveKVStoreData(oldDB types.KVStore, newDB types.KVStore) error { itr.Close() // then delete the old store - return deleteKVStore(oldDB) + deleteKVStore(oldDB) } // SetInterBlockCache sets the Store's internal inter-block (persistent) cache. @@ -579,22 +577,28 @@ func (rs *Store) GetStoreByName(name string) types.Store { // Query calls substore.Query with the same `req` where `req.Path` is // modified to remove the substore prefix. // Ie. `req.Path` here is `//`, and trimmed to `/` for the substore. -// TODO: add proof for `multistore -> substore`. +// Special case: if `req.Path` is `/proofs`, the commit hash is included +// as response value. In addition, proofs of every store are appended to the response for +// the requested height func (rs *Store) Query(req abci.RequestQuery) abci.ResponseQuery { path := req.Path - storeName, subpath, err := parsePath(path) + firstPath, subpath, err := parsePath(path) if err != nil { return sdkerrors.QueryResult(err) } - store := rs.GetStoreByName(storeName) + if firstPath == proofsPath { + return rs.doProofsQuery(req) + } + + store := rs.GetStoreByName(firstPath) if store == nil { - return sdkerrors.QueryResult(sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "no such store: %s", storeName)) + return sdkerrors.QueryResult(sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "no such store: %s", firstPath)) } queryable, ok := store.(types.Queryable) if !ok { - return sdkerrors.QueryResult(sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "store %s (type %T) doesn't support queries", storeName, store)) + return sdkerrors.QueryResult(sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "store %s (type %T) doesn't support queries", firstPath, store)) } // trim the path and make the query @@ -624,13 +628,14 @@ func (rs *Store) Query(req abci.RequestQuery) abci.ResponseQuery { } // Restore origin path and append proof op. - res.ProofOps.Ops = append(res.ProofOps.Ops, commitInfo.ProofOp(storeName)) + res.ProofOps.Ops = append(res.ProofOps.Ops, commitInfo.ProofOp(firstPath)) return res } // SetInitialVersion sets the initial version of the IAVL tree. It is used when // starting a new chain at an arbitrary height. +// NOTE: this never errors. Can we fix the function signature ? func (rs *Store) SetInitialVersion(version int64) error { rs.initialVersion = version @@ -980,6 +985,24 @@ func commitStores(version int64, storeMap map[types.StoreKey]types.CommitKVStore } } +func (rs *Store) doProofsQuery(req abci.RequestQuery) abci.ResponseQuery { + commitInfo, err := getCommitInfo(rs.db, req.Height) + if err != nil { + return sdkerrors.QueryResult(err) + } + res := abci.ResponseQuery{ + Height: req.Height, + Key: []byte(proofsPath), + Value: commitInfo.CommitID().Hash, + ProofOps: &crypto.ProofOps{Ops: make([]crypto.ProofOp, 0, len(commitInfo.StoreInfos))}, + } + + for _, storeInfo := range commitInfo.StoreInfos { + res.ProofOps.Ops = append(res.ProofOps.Ops, commitInfo.ProofOp(storeInfo.Name)) + } + return res +} + // Gets commitInfo from disk. func getCommitInfo(db dbm.DB, ver int64) (*types.CommitInfo, error) { cInfoKey := fmt.Sprintf(commitInfoKeyFmt, ver) diff --git a/store/rootmulti/store_test.go b/store/rootmulti/store_test.go index 26f4e8a36cc..fd706dd3c98 100644 --- a/store/rootmulti/store_test.go +++ b/store/rootmulti/store_test.go @@ -410,7 +410,7 @@ func TestMultiStoreQuery(t *testing.T) { k2, v2 := []byte("water"), []byte("flows") // v3 := []byte("is cold") - cid := multi.Commit() + cid1 := multi.Commit() // Make sure we can get by name. garbage := multi.GetStoreByName("bad-name") @@ -425,8 +425,8 @@ func TestMultiStoreQuery(t *testing.T) { store2.Set(k2, v2) // Commit the multistore. - cid = multi.Commit() - ver := cid.Version + cid2 := multi.Commit() + ver := cid2.Version // Reload multistore from database multi = newMultiStoreWithMounts(db, types.PruneNothing) @@ -468,6 +468,26 @@ func TestMultiStoreQuery(t *testing.T) { qres = multi.Query(query) require.EqualValues(t, 0, qres.Code) require.Equal(t, v2, qres.Value) + + // Test proofs latest height + query.Path = fmt.Sprintf("/%s", proofsPath) + qres = multi.Query(query) + require.EqualValues(t, 0, qres.Code) + require.NotNil(t, qres.ProofOps) + require.Equal(t, []byte(proofsPath), qres.Key) + require.Equal(t, cid2.Hash, qres.Value) + require.Equal(t, cid2.Version, qres.Height) + require.Equal(t, 3, len(qres.ProofOps.Ops)) // 3 mounted stores + + // Test proofs second latest height + query.Height = query.Height - 1 + qres = multi.Query(query) + require.EqualValues(t, 0, qres.Code) + require.NotNil(t, qres.ProofOps) + require.Equal(t, []byte(proofsPath), qres.Key) + require.Equal(t, cid1.Hash, qres.Value) + require.Equal(t, cid1.Version, qres.Height) + require.Equal(t, 3, len(qres.ProofOps.Ops)) // 3 mounted stores } func TestMultiStore_Pruning(t *testing.T) { diff --git a/store/types/proof.go b/store/types/proof.go index db8f673f46c..ac0897ac10b 100644 --- a/store/types/proof.go +++ b/store/types/proof.go @@ -79,15 +79,15 @@ func (op CommitmentOp) GetKey() []byte { return op.Key } -// Run takes in a list of arguments and attempts to run the proof op against these arguments +// Run takes in a list of arguments and attempts to run the proof op against these arguments. // Returns the root wrapped in [][]byte if the proof op succeeds with given args. If not, // it will return an error. // // CommitmentOp will accept args of length 1 or length 0 // If length 1 args is passed in, then CommitmentOp will attempt to prove the existence of the key -// with the value provided by args[0] using the embedded CommitmentProof and return the CommitmentRoot of the proof +// with the value provided by args[0] using the embedded CommitmentProof and return the CommitmentRoot of the proof. // If length 0 args is passed in, then CommitmentOp will attempt to prove the absence of the key -// in the CommitmentOp and return the CommitmentRoot of the proof +// in the CommitmentOp and return the CommitmentRoot of the proof. func (op CommitmentOp) Run(args [][]byte) ([][]byte, error) { // calculate root from proof root, err := op.Proof.Calculate() diff --git a/x/auth/tx/service.go b/x/auth/tx/service.go index 4d8e3b5e16b..6c2a9c364e5 100644 --- a/x/auth/tx/service.go +++ b/x/auth/tx/service.go @@ -119,7 +119,7 @@ func (s txServer) Simulate(ctx context.Context, req *txtypes.SimulateRequest) (* gasInfo, result, err := s.simulate(txBytes) if err != nil { - return nil, err + return nil, status.Errorf(codes.Unknown, "%v With gas wanted: '%d' and gas used: '%d' ", err, gasInfo.GasWanted, gasInfo.GasUsed) } return &txtypes.SimulateResponse{ diff --git a/x/authz/client/rest/grpc_query_test.go b/x/authz/client/rest/grpc_query_test.go index 5ab0d1de329..8c94031fc15 100644 --- a/x/authz/client/rest/grpc_query_test.go +++ b/x/authz/client/rest/grpc_query_test.go @@ -5,7 +5,6 @@ package rest_test import ( "fmt" - "testing" "time" "github.com/stretchr/testify/suite" @@ -257,7 +256,7 @@ func (s *IntegrationTestSuite) TestQueryGranterGrantsGRPC() { }, { "no authorizations found", - fmt.Sprintf("%s/cosmos/authz/v1beta1/grants/granter/%s", val.APIAddress, grantee.String()), + fmt.Sprintf("%s/cosmos/authz/v1beta1/grants/granter/%s", val.APIAddress, string(grantee)), false, "", 0, @@ -316,7 +315,7 @@ func (s *IntegrationTestSuite) TestQueryGranteeGrantsGRPC() { }, { "valid query", - fmt.Sprintf("%s/cosmos/authz/v1beta1/grants/grantee/%s", val.APIAddress, grantee.String()), + fmt.Sprintf("%s/cosmos/authz/v1beta1/grants/grantee/%s", val.APIAddress, string(grantee)), false, "", 1, diff --git a/x/authz/simulation/operations.go b/x/authz/simulation/operations.go index 44f92a8e87a..3d57041fed6 100644 --- a/x/authz/simulation/operations.go +++ b/x/authz/simulation/operations.go @@ -117,7 +117,6 @@ func SimulateMsgGrant(ak authz.AccountKeeper, bk authz.BankKeeper, _ keeper.Keep } txCfg := simappparams.MakeTestEncodingConfig().TxConfig tx, err := helpers.GenTx( - r, txCfg, []sdk.Msg{msg}, fees, @@ -184,7 +183,6 @@ func SimulateMsgRevoke(ak authz.AccountKeeper, bk authz.BankKeeper, k keeper.Kee txCfg := simappparams.MakeTestEncodingConfig().TxConfig account := ak.GetAccount(ctx, granterAddr) tx, err := helpers.GenTx( - r, txCfg, []sdk.Msg{&msg}, fees, @@ -243,10 +241,6 @@ func SimulateMsgExec(ak authz.AccountKeeper, bk authz.BankKeeper, k keeper.Keepe granterspendableCoins := bk.SpendableCoins(ctx, granterAddr) coins := simtypes.RandSubsetCoins(r, granterspendableCoins) - // if coins slice is empty, we can not create valid banktype.MsgSend - if len(coins) == 0 { - return simtypes.NoOpMsg(authz.ModuleName, TypeMsgExec, "empty coins slice"), nil, nil - } // Check send_enabled status of each sent coin denom if err := bk.IsSendEnabledCoins(ctx, coins...); err != nil { return simtypes.NoOpMsg(authz.ModuleName, TypeMsgExec, err.Error()), nil, nil @@ -279,7 +273,6 @@ func SimulateMsgExec(ak authz.AccountKeeper, bk authz.BankKeeper, k keeper.Keepe txCfg := simappparams.MakeTestEncodingConfig().TxConfig granteeAcc := ak.GetAccount(ctx, granteeAddr) tx, err := helpers.GenTx( - r, txCfg, []sdk.Msg{&msgExec}, fees, diff --git a/x/bank/client/cli/tx.go b/x/bank/client/cli/tx.go index 4fee9ffd1e8..6d2f51d486c 100644 --- a/x/bank/client/cli/tx.go +++ b/x/bank/client/cli/tx.go @@ -38,17 +38,20 @@ ignored as it is implied from [from_key_or_address].`, if err != nil { return err } - toAddr, err := sdk.AccAddressFromBech32(args[1]) - if err != nil { - return err - } coins, err := sdk.ParseCoinsNormalized(args[2]) if err != nil { return err } - msg := types.NewMsgSend(clientCtx.GetFromAddress(), toAddr, coins) + msg := &types.MsgSend{ + FromAddress: clientCtx.GetFromAddress().String(), + ToAddress: args[1], + Amount: coins, + } + if err := msg.ValidateBasic(); err != nil { + return err + } return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg) }, diff --git a/x/bank/module.go b/x/bank/module.go index 8035240795b..000c72070ab 100644 --- a/x/bank/module.go +++ b/x/bank/module.go @@ -38,6 +38,10 @@ type AppModuleBasic struct { cdc codec.Codec } +func NewAppModuleBasic(cdc codec.Codec) AppModuleBasic { + return AppModuleBasic{cdc} +} + // Name returns the bank module's name. func (AppModuleBasic) Name() string { return types.ModuleName } diff --git a/x/bank/simulation/operations.go b/x/bank/simulation/operations.go index b354fbfe9df..aa12c0e0478 100644 --- a/x/bank/simulation/operations.go +++ b/x/bank/simulation/operations.go @@ -59,10 +59,6 @@ func SimulateMsgSend(ak types.AccountKeeper, bk keeper.Keeper) simtypes.Operatio accs []simtypes.Account, chainID string, ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { from, to, coins, skip := randomSendFields(r, ctx, accs, bk, ak) - // if coins slice is empty, we can not create valid types.MsgSend - if len(coins) == 0 { - return simtypes.NoOpMsg(types.ModuleName, types.TypeMsgSend, "empty coins slice"), nil, nil - } // Check send_enabled status of each coin denom if err := bk.IsSendEnabledCoins(ctx, coins...); err != nil { @@ -97,10 +93,6 @@ func SimulateMsgSendToModuleAccount(ak types.AccountKeeper, bk keeper.Keeper, mo spendable := bk.SpendableCoins(ctx, from.Address) coins := simtypes.RandSubsetCoins(r, spendable) - // if coins slice is empty, we can not create valid types.MsgSend - if len(coins) == 0 { - return simtypes.NoOpMsg(types.ModuleName, types.TypeMsgSend, "empty coins slice"), nil, nil - } // Check send_enabled status of each coin denom if err := bk.IsSendEnabledCoins(ctx, coins...); err != nil { @@ -145,7 +137,6 @@ func sendMsgSend( } txGen := simappparams.MakeTestEncodingConfig().TxConfig tx, err := helpers.GenTx( - r, txGen, []sdk.Msg{msg}, fees, @@ -359,7 +350,6 @@ func sendMsgMultiSend( txGen := simappparams.MakeTestEncodingConfig().TxConfig tx, err := helpers.GenTx( - r, txGen, []sdk.Msg{msg}, fees, diff --git a/x/distribution/keeper/allocation.go b/x/distribution/keeper/allocation.go index 273db1314eb..02be99db4e6 100644 --- a/x/distribution/keeper/allocation.go +++ b/x/distribution/keeper/allocation.go @@ -81,16 +81,22 @@ func (k Keeper) AllocateTokens( // calculate fraction allocated to validators communityTax := k.GetCommunityTax(ctx) voteMultiplier := sdk.OneDec().Sub(proposerMultiplier).Sub(communityTax) + feeMultiplier := feesCollected.MulDecTruncate(voteMultiplier) // allocate tokens proportionally to voting power - // TODO consider parallelizing later, ref https://github.com/cosmos/cosmos-sdk/pull/3099#discussion_r246276376 + // + // TODO: Consider parallelizing later + // + // Ref: https://github.com/cosmos/cosmos-sdk/pull/3099#discussion_r246276376 for _, vote := range bondedVotes { validator := k.stakingKeeper.ValidatorByConsAddr(ctx, vote.Validator.Address) - // TODO consider microslashing for missing votes. - // ref https://github.com/cosmos/cosmos-sdk/issues/2525#issuecomment-430838701 + // TODO: Consider micro-slashing for missing votes. + // + // Ref: https://github.com/cosmos/cosmos-sdk/issues/2525#issuecomment-430838701 powerFraction := sdk.NewDec(vote.Validator.Power).QuoTruncate(sdk.NewDec(totalPreviousPower)) - reward := feesCollected.MulDecTruncate(voteMultiplier).MulDecTruncate(powerFraction) + reward := feeMultiplier.MulDecTruncate(powerFraction) + k.AllocateTokensToValidator(ctx, validator, reward) remaining = remaining.Sub(reward) } @@ -100,7 +106,8 @@ func (k Keeper) AllocateTokens( k.SetFeePool(ctx, feePool) } -// AllocateTokensToValidator allocate tokens to a particular validator, splitting according to commission +// AllocateTokensToValidator allocate tokens to a particular validator, +// splitting according to commission. func (k Keeper) AllocateTokensToValidator(ctx sdk.Context, val stakingtypes.ValidatorI, tokens sdk.DecCoins) { // split tokens between validator and delegators according to commission commission := tokens.MulDec(val.GetCommission()) @@ -131,6 +138,7 @@ func (k Keeper) AllocateTokensToValidator(ctx sdk.Context, val stakingtypes.Vali sdk.NewAttribute(types.AttributeKeyValidator, val.GetOperator().String()), ), ) + outstanding := k.GetValidatorOutstandingRewards(ctx, val.GetOperator()) outstanding.Rewards = outstanding.Rewards.Add(tokens...) k.SetValidatorOutstandingRewards(ctx, val.GetOperator(), outstanding) diff --git a/x/distribution/keeper/delegation_test.go b/x/distribution/keeper/delegation_test.go index 8b4219e2802..020bb88c08d 100644 --- a/x/distribution/keeper/delegation_test.go +++ b/x/distribution/keeper/delegation_test.go @@ -330,13 +330,6 @@ func TestWithdrawDelegationRewardsBasic(t *testing.T) { // withdraw commission _, err = app.DistrKeeper.WithdrawValidatorCommission(ctx, valAddrs[0]) require.Nil(t, err) - - // assert correct balance - exp = balanceTokens.Sub(valTokens).Add(initial) - require.Equal(t, - sdk.Coins{sdk.NewCoin(sdk.DefaultBondDenom, exp)}, - app.BankKeeper.GetAllBalances(ctx, sdk.AccAddress(valAddrs[0])), - ) } func TestCalculateRewardsAfterManySlashesInSameBlock(t *testing.T) { diff --git a/x/distribution/simulation/operations.go b/x/distribution/simulation/operations.go index 9d898d07f9c..89a667777f5 100644 --- a/x/distribution/simulation/operations.go +++ b/x/distribution/simulation/operations.go @@ -229,7 +229,6 @@ func SimulateMsgFundCommunityPool(ak types.AccountKeeper, bk types.BankKeeper, k msg := types.NewMsgFundCommunityPool(fundAmount, funder.Address) txCtx := simulation.OperationInput{ - R: r, App: app, TxGen: simappparams.MakeTestEncodingConfig().TxConfig, Cdc: nil, diff --git a/x/genutil/gentx_test.go b/x/genutil/gentx_test.go index bf0a6ff7c07..a6e636f84f1 100644 --- a/x/genutil/gentx_test.go +++ b/x/genutil/gentx_test.go @@ -3,9 +3,7 @@ package genutil_test import ( "encoding/json" "fmt" - "math/rand" "testing" - "time" "github.com/stretchr/testify/suite" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -235,7 +233,6 @@ func (suite *GenTxTestSuite) TestDeliverGenTxs() { msg := banktypes.NewMsgSend(addr1, addr2, sdk.Coins{sdk.NewInt64Coin(sdk.DefaultBondDenom, 1)}) tx, err := helpers.GenTx( - rand.New(rand.NewSource(time.Now().UnixNano())), suite.encodingConfig.TxConfig, []sdk.Msg{msg}, sdk.Coins{sdk.NewInt64Coin(sdk.DefaultBondDenom, 10)}, diff --git a/x/gov/simulation/operations.go b/x/gov/simulation/operations.go index d8aecd1e32f..25fc6d9b811 100644 --- a/x/gov/simulation/operations.go +++ b/x/gov/simulation/operations.go @@ -155,7 +155,6 @@ func SimulateMsgSubmitProposal( txGen := simappparams.MakeTestEncodingConfig().TxConfig tx, err := helpers.GenTx( - r, txGen, []sdk.Msg{msg}, fees, @@ -242,7 +241,6 @@ func SimulateMsgDeposit(ak types.AccountKeeper, bk types.BankKeeper, k keeper.Ke } txCtx := simulation.OperationInput{ - R: r, App: app, TxGen: simappparams.MakeTestEncodingConfig().TxConfig, Cdc: nil, diff --git a/x/simulation/util.go b/x/simulation/util.go index 9ca1205411c..beeb009d02b 100644 --- a/x/simulation/util.go +++ b/x/simulation/util.go @@ -101,7 +101,6 @@ func GenAndDeliverTxWithRandFees(txCtx OperationInput) (simtypes.OperationMsg, [ func GenAndDeliverTx(txCtx OperationInput, fees sdk.Coins) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { account := txCtx.AccountKeeper.GetAccount(txCtx.Context, txCtx.SimAccount.Address) tx, err := helpers.GenTx( - txCtx.R, txCtx.TxGen, []sdk.Msg{txCtx.Msg}, fees, diff --git a/x/slashing/simulation/operations.go b/x/slashing/simulation/operations.go index a4bfd93d549..788ae4f90eb 100644 --- a/x/slashing/simulation/operations.go +++ b/x/slashing/simulation/operations.go @@ -88,7 +88,6 @@ func SimulateMsgUnjail(ak types.AccountKeeper, bk types.BankKeeper, k keeper.Kee txGen := simappparams.MakeTestEncodingConfig().TxConfig tx, err := helpers.GenTx( - r, txGen, []sdk.Msg{msg}, fees, diff --git a/x/staking/simulation/operations.go b/x/staking/simulation/operations.go index 2a1c86d635c..6bedbb5315b 100644 --- a/x/staking/simulation/operations.go +++ b/x/staking/simulation/operations.go @@ -152,7 +152,6 @@ func SimulateMsgCreateValidator(ak types.AccountKeeper, bk types.BankKeeper, k k } txCtx := simulation.OperationInput{ - R: r, App: app, TxGen: simappparams.MakeTestEncodingConfig().TxConfig, Cdc: nil, @@ -277,7 +276,6 @@ func SimulateMsgDelegate(ak types.AccountKeeper, bk types.BankKeeper, k keeper.K msg := types.NewMsgDelegate(simAccount.Address, val.GetOperator(), bondAmt) txCtx := simulation.OperationInput{ - R: r, App: app, TxGen: simappparams.MakeTestEncodingConfig().TxConfig, Cdc: nil, diff --git a/x/staking/types/tx.pb.go b/x/staking/types/tx.pb.go index c3a1bf70659..59e3b4ba665 100644 --- a/x/staking/types/tx.pb.go +++ b/x/staking/types/tx.pb.go @@ -240,6 +240,27 @@ func (m *MsgDelegate) XXX_DiscardUnknown() { var xxx_messageInfo_MsgDelegate proto.InternalMessageInfo +func (m *MsgDelegate) GetDelegatorAddress() string { + if m != nil { + return m.DelegatorAddress + } + return "" +} + +func (m *MsgDelegate) GetValidatorAddress() string { + if m != nil { + return m.ValidatorAddress + } + return "" +} + +func (m *MsgDelegate) GetAmount() types1.Coin { + if m != nil { + return m.Amount + } + return types1.Coin{} +} + // MsgDelegateResponse defines the Msg/Delegate response type. type MsgDelegateResponse struct { } @@ -319,6 +340,34 @@ func (m *MsgBeginRedelegate) XXX_DiscardUnknown() { var xxx_messageInfo_MsgBeginRedelegate proto.InternalMessageInfo +func (m *MsgBeginRedelegate) GetDelegatorAddress() string { + if m != nil { + return m.DelegatorAddress + } + return "" +} + +func (m *MsgBeginRedelegate) GetValidatorSrcAddress() string { + if m != nil { + return m.ValidatorSrcAddress + } + return "" +} + +func (m *MsgBeginRedelegate) GetValidatorDstAddress() string { + if m != nil { + return m.ValidatorDstAddress + } + return "" +} + +func (m *MsgBeginRedelegate) GetAmount() types1.Coin { + if m != nil { + return m.Amount + } + return types1.Coin{} +} + // MsgBeginRedelegateResponse defines the Msg/BeginRedelegate response type. type MsgBeginRedelegateResponse struct { CompletionTime time.Time `protobuf:"bytes,1,opt,name=completion_time,json=completionTime,proto3,stdtime" json:"completion_time"` @@ -405,6 +454,27 @@ func (m *MsgUndelegate) XXX_DiscardUnknown() { var xxx_messageInfo_MsgUndelegate proto.InternalMessageInfo +func (m *MsgUndelegate) GetDelegatorAddress() string { + if m != nil { + return m.DelegatorAddress + } + return "" +} + +func (m *MsgUndelegate) GetValidatorAddress() string { + if m != nil { + return m.ValidatorAddress + } + return "" +} + +func (m *MsgUndelegate) GetAmount() types1.Coin { + if m != nil { + return m.Amount + } + return types1.Coin{} +} + // MsgUndelegateResponse defines the Msg/Undelegate response type. type MsgUndelegateResponse struct { CompletionTime time.Time `protobuf:"bytes,1,opt,name=completion_time,json=completionTime,proto3,stdtime" json:"completion_time"` @@ -466,61 +536,61 @@ func init() { func init() { proto.RegisterFile("cosmos/staking/v1beta1/tx.proto", fileDescriptor_0926ef28816b35ab) } var fileDescriptor_0926ef28816b35ab = []byte{ - // 860 bytes of a gzipped FileDescriptorProto + // 861 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x56, 0x4d, 0x6b, 0xe3, 0x46, 0x18, 0xb6, 0x6c, 0xc7, 0x4d, 0x27, 0xe4, 0x4b, 0xf9, 0xc0, 0x11, 0xc1, 0x0a, 0x4a, 0x3f, 0x42, 0xdb, 0xc8, 0x4d, 0x4a, 0x29, 0xe4, 0x52, 0xe2, 0xb8, 0xa1, 0x21, 0x35, 0x14, 0x25, 0xed, 0xa1, 0x14, 0x8c, 0x3e, 0xc6, 0xaa, 0xb0, 0xa4, 0x51, 0x34, 0xe3, 0x10, 0x43, 0x7f, 0x40, 0x8f, 0x81, - 0xde, 0xf6, 0x94, 0x1f, 0xb1, 0x3f, 0x22, 0x2c, 0xec, 0x92, 0xe3, 0xb2, 0x07, 0xef, 0x92, 0xc0, - 0x92, 0xb3, 0x7f, 0xc1, 0xa2, 0xd1, 0x48, 0x96, 0xe5, 0x0f, 0x4c, 0x58, 0x5f, 0xf6, 0x64, 0x33, - 0xf3, 0xcc, 0xf3, 0xce, 0xfb, 0xbc, 0xcf, 0xbc, 0xaf, 0x80, 0xa8, 0x23, 0xec, 0x20, 0x5c, 0xc6, - 0x44, 0x6d, 0x5a, 0xae, 0x59, 0xbe, 0xdc, 0xd3, 0x20, 0x51, 0xf7, 0xca, 0xe4, 0x4a, 0xf6, 0x7c, - 0x44, 0x10, 0xbf, 0x1e, 0x02, 0x64, 0x06, 0x90, 0x19, 0x40, 0xd8, 0x30, 0x11, 0x32, 0x6d, 0x58, - 0xa6, 0x28, 0xad, 0xd5, 0x28, 0xab, 0x6e, 0x3b, 0x3c, 0x22, 0x88, 0xe9, 0x2d, 0x62, 0x39, 0x10, - 0x13, 0xd5, 0xf1, 0x18, 0x60, 0xd5, 0x44, 0x26, 0xa2, 0x7f, 0xcb, 0xc1, 0x3f, 0xb6, 0xba, 0x11, - 0x46, 0xaa, 0x87, 0x1b, 0x2c, 0x6c, 0xb8, 0x55, 0x62, 0xb7, 0xd4, 0x54, 0x0c, 0xe3, 0x2b, 0xea, - 0xc8, 0x72, 0xd9, 0xfe, 0x17, 0x23, 0xb2, 0x88, 0x2e, 0x4d, 0x51, 0xd2, 0xcb, 0x3c, 0xe0, 0x6b, - 0xd8, 0x3c, 0xf2, 0xa1, 0x4a, 0xe0, 0x9f, 0xaa, 0x6d, 0x19, 0x2a, 0x41, 0x3e, 0x7f, 0x0a, 0xe6, - 0x0c, 0x88, 0x75, 0xdf, 0xf2, 0x88, 0x85, 0xdc, 0x22, 0xb7, 0xc5, 0xed, 0xcc, 0xed, 0x6f, 0xcb, - 0xc3, 0xf3, 0x96, 0xab, 0x3d, 0x68, 0x25, 0x7f, 0xdb, 0x11, 0x33, 0x4a, 0xf2, 0x34, 0x5f, 0x03, - 0x40, 0x47, 0x8e, 0x63, 0x61, 0x1c, 0x70, 0x65, 0x29, 0xd7, 0xd7, 0xa3, 0xb8, 0x8e, 0x62, 0xa4, - 0xa2, 0x12, 0x88, 0x19, 0x5f, 0x82, 0x80, 0xff, 0x17, 0xac, 0x38, 0x96, 0x5b, 0xc7, 0xd0, 0x6e, - 0xd4, 0x0d, 0x68, 0x43, 0x53, 0xa5, 0x77, 0xcc, 0x6d, 0x71, 0x3b, 0x9f, 0x57, 0x7e, 0x0b, 0xe0, - 0x6f, 0x3a, 0xe2, 0x57, 0xa6, 0x45, 0xfe, 0x69, 0x69, 0xb2, 0x8e, 0x1c, 0x26, 0x1b, 0xfb, 0xd9, - 0xc5, 0x46, 0xb3, 0x4c, 0xda, 0x1e, 0xc4, 0xf2, 0x89, 0x4b, 0xba, 0x1d, 0x51, 0x68, 0xab, 0x8e, - 0x7d, 0x20, 0x0d, 0xa1, 0x94, 0x94, 0x65, 0xc7, 0x72, 0xcf, 0xa0, 0xdd, 0xa8, 0xc6, 0x6b, 0xfc, - 0x09, 0x58, 0x66, 0x08, 0xe4, 0xd7, 0x55, 0xc3, 0xf0, 0x21, 0xc6, 0xc5, 0x3c, 0x8d, 0xbd, 0xd9, - 0xed, 0x88, 0xc5, 0x90, 0x6d, 0x00, 0x22, 0x29, 0x4b, 0xf1, 0xda, 0x61, 0xb8, 0x14, 0x50, 0x5d, - 0x46, 0x8a, 0xc7, 0x54, 0x33, 0x69, 0xaa, 0x01, 0x88, 0xa4, 0x2c, 0xc5, 0x6b, 0x11, 0xd5, 0x31, - 0x28, 0x78, 0x2d, 0xad, 0x09, 0xdb, 0xc5, 0x02, 0x95, 0x77, 0x55, 0x0e, 0xfd, 0x26, 0x47, 0x7e, - 0x93, 0x0f, 0xdd, 0x76, 0xa5, 0xf8, 0xe2, 0xf9, 0xee, 0x2a, 0xd3, 0x5d, 0xf7, 0xdb, 0x1e, 0x41, - 0xf2, 0xef, 0x2d, 0xed, 0x14, 0xb6, 0x15, 0x76, 0x9a, 0xff, 0x11, 0xcc, 0x5c, 0xaa, 0x76, 0x0b, - 0x16, 0x3f, 0xa3, 0x34, 0x1b, 0x51, 0x95, 0x02, 0x93, 0x25, 0x4a, 0x64, 0x45, 0x75, 0x0e, 0xd1, - 0x07, 0xb3, 0xff, 0xdd, 0x88, 0x99, 0xc7, 0x1b, 0x31, 0x23, 0x6d, 0x02, 0x61, 0xd0, 0x4e, 0x0a, - 0xc4, 0x1e, 0x72, 0x31, 0x94, 0xfe, 0xcf, 0x81, 0xa5, 0x1a, 0x36, 0x7f, 0x31, 0x2c, 0x32, 0x25, - 0xaf, 0xfd, 0x3c, 0x4c, 0xd3, 0x2c, 0xd5, 0x94, 0xef, 0x76, 0xc4, 0x85, 0x50, 0xd3, 0x31, 0x4a, - 0x3a, 0x60, 0xb1, 0xe7, 0xb5, 0xba, 0xaf, 0x12, 0xc8, 0x9c, 0x55, 0x9d, 0xd0, 0x55, 0x55, 0xa8, - 0x77, 0x3b, 0xe2, 0x7a, 0x18, 0x28, 0x45, 0x25, 0x29, 0x0b, 0x7a, 0x9f, 0xbf, 0xf9, 0xab, 0xe1, - 0x66, 0x0e, 0x0d, 0xf5, 0xeb, 0x14, 0x8d, 0x9c, 0xa8, 0x99, 0x00, 0x8a, 0xe9, 0xa2, 0xc4, 0x15, - 0x7b, 0xcf, 0x81, 0xb9, 0x1a, 0x36, 0xd9, 0x39, 0x38, 0xdc, 0xfe, 0xdc, 0xc7, 0xb3, 0x7f, 0xf6, - 0x49, 0xf6, 0xff, 0x09, 0x14, 0x54, 0x07, 0xb5, 0x5c, 0x42, 0x6b, 0x35, 0x81, 0x6f, 0x19, 0x3c, - 0x21, 0xc2, 0x1a, 0x58, 0x49, 0xe4, 0x19, 0xe7, 0xff, 0x2a, 0x4b, 0xfb, 0x63, 0x05, 0x9a, 0x96, - 0xab, 0x40, 0x63, 0x0a, 0x32, 0x9c, 0x83, 0xb5, 0x5e, 0x8e, 0xd8, 0xd7, 0x53, 0x52, 0x6c, 0x75, - 0x3b, 0xe2, 0x66, 0x5a, 0x8a, 0x04, 0x4c, 0x52, 0x56, 0xe2, 0xf5, 0x33, 0x5f, 0x1f, 0xca, 0x6a, - 0x60, 0x12, 0xb3, 0xe6, 0x46, 0xb3, 0x26, 0x60, 0x49, 0xd6, 0x2a, 0x26, 0x83, 0x3a, 0xe7, 0x9f, - 0xaa, 0x73, 0x93, 0x36, 0x88, 0x94, 0x9e, 0x91, 0xdc, 0x7c, 0x8d, 0xbe, 0x3e, 0xcf, 0x86, 0x81, - 0x45, 0xeb, 0xc1, 0x8c, 0x64, 0xfd, 0x40, 0x18, 0x68, 0x68, 0xe7, 0xd1, 0x00, 0xad, 0xcc, 0x06, - 0xa1, 0xae, 0xdf, 0x8a, 0x1c, 0x7d, 0x5d, 0xec, 0x70, 0xb0, 0x2d, 0x3d, 0x72, 0x60, 0xbe, 0x86, - 0xcd, 0x3f, 0x5c, 0xe3, 0x93, 0xf7, 0x6f, 0x03, 0xac, 0xf5, 0x65, 0x3a, 0x25, 0x49, 0xf7, 0x9f, - 0xe5, 0x41, 0xae, 0x86, 0x4d, 0xfe, 0x02, 0x2c, 0xa6, 0x3f, 0x1a, 0xbe, 0x19, 0xd5, 0xb3, 0x07, - 0x27, 0x82, 0xb0, 0x3f, 0x39, 0x36, 0xce, 0xa4, 0x09, 0xe6, 0xfb, 0x27, 0xc7, 0xce, 0x18, 0x92, - 0x3e, 0xa4, 0xf0, 0xfd, 0xa4, 0xc8, 0x38, 0xd8, 0xdf, 0x60, 0x36, 0x6e, 0x7a, 0xdb, 0x63, 0x4e, - 0x47, 0x20, 0xe1, 0xdb, 0x09, 0x40, 0x31, 0xfb, 0x05, 0x58, 0x4c, 0xb7, 0x94, 0x71, 0xea, 0xa5, - 0xb0, 0x63, 0xd5, 0x1b, 0xf5, 0xb4, 0x34, 0x00, 0x12, 0xef, 0xe0, 0xcb, 0x31, 0x0c, 0x3d, 0x98, - 0xb0, 0x3b, 0x11, 0x2c, 0x8a, 0x51, 0x39, 0xbe, 0xbd, 0x2f, 0x71, 0x77, 0xf7, 0x25, 0xee, 0xdd, - 0x7d, 0x89, 0xbb, 0x7e, 0x28, 0x65, 0xee, 0x1e, 0x4a, 0x99, 0xd7, 0x0f, 0xa5, 0xcc, 0x5f, 0xdf, - 0x8d, 0x1d, 0x63, 0x57, 0xf1, 0x57, 0x2a, 0x1d, 0x68, 0x5a, 0x81, 0x5a, 0xf2, 0x87, 0x0f, 0x01, - 0x00, 0x00, 0xff, 0xff, 0xa1, 0x2b, 0xfd, 0x07, 0x8a, 0x0b, 0x00, 0x00, + 0xde, 0x7a, 0xca, 0x8f, 0xe8, 0x8f, 0x08, 0x81, 0x85, 0x1c, 0x97, 0x3d, 0x78, 0x17, 0x67, 0x0f, + 0x7b, 0xf6, 0x2f, 0x58, 0x34, 0x1a, 0xc9, 0xb2, 0xfc, 0x81, 0x09, 0xeb, 0xcb, 0x9e, 0x6c, 0x66, + 0x9e, 0x79, 0xde, 0x79, 0x9f, 0xf7, 0x99, 0xf7, 0x15, 0x10, 0x75, 0x84, 0x1d, 0x84, 0xcb, 0x98, + 0xa8, 0x4d, 0xcb, 0x35, 0xcb, 0xd7, 0x07, 0x1a, 0x24, 0xea, 0x41, 0x99, 0xdc, 0xc8, 0x9e, 0x8f, + 0x08, 0xe2, 0x37, 0x43, 0x80, 0xcc, 0x00, 0x32, 0x03, 0x08, 0x5b, 0x26, 0x42, 0xa6, 0x0d, 0xcb, + 0x14, 0xa5, 0xb5, 0x1a, 0x65, 0xd5, 0x6d, 0x87, 0x47, 0x04, 0x31, 0xbd, 0x45, 0x2c, 0x07, 0x62, + 0xa2, 0x3a, 0x1e, 0x03, 0xac, 0x9b, 0xc8, 0x44, 0xf4, 0x6f, 0x39, 0xf8, 0xc7, 0x56, 0xb7, 0xc2, + 0x48, 0xf5, 0x70, 0x83, 0x85, 0x0d, 0xb7, 0x4a, 0xec, 0x96, 0x9a, 0x8a, 0x61, 0x7c, 0x45, 0x1d, + 0x59, 0x2e, 0xdb, 0xff, 0x6c, 0x4c, 0x16, 0xd1, 0xa5, 0x29, 0x4a, 0x7a, 0x91, 0x07, 0x7c, 0x0d, + 0x9b, 0x27, 0x3e, 0x54, 0x09, 0xfc, 0x5d, 0xb5, 0x2d, 0x43, 0x25, 0xc8, 0xe7, 0xcf, 0xc1, 0x82, + 0x01, 0xb1, 0xee, 0x5b, 0x1e, 0xb1, 0x90, 0x5b, 0xe4, 0x76, 0xb8, 0xbd, 0x85, 0xc3, 0x5d, 0x79, + 0x74, 0xde, 0x72, 0xb5, 0x0f, 0xad, 0xe4, 0xef, 0x3b, 0x62, 0x46, 0x49, 0x9e, 0xe6, 0x6b, 0x00, + 0xe8, 0xc8, 0x71, 0x2c, 0x8c, 0x03, 0xae, 0x2c, 0xe5, 0xfa, 0x72, 0x1c, 0xd7, 0x49, 0x8c, 0x54, + 0x54, 0x02, 0x31, 0xe3, 0x4b, 0x10, 0xf0, 0x7f, 0x83, 0x35, 0xc7, 0x72, 0xeb, 0x18, 0xda, 0x8d, + 0xba, 0x01, 0x6d, 0x68, 0xaa, 0xf4, 0x8e, 0xb9, 0x1d, 0x6e, 0xef, 0xd3, 0xca, 0x2f, 0x01, 0xfc, + 0x55, 0x47, 0xfc, 0xc2, 0xb4, 0xc8, 0x5f, 0x2d, 0x4d, 0xd6, 0x91, 0xc3, 0x64, 0x63, 0x3f, 0xfb, + 0xd8, 0x68, 0x96, 0x49, 0xdb, 0x83, 0x58, 0x3e, 0x73, 0x49, 0xaf, 0x23, 0x0a, 0x6d, 0xd5, 0xb1, + 0x8f, 0xa4, 0x11, 0x94, 0x92, 0xb2, 0xea, 0x58, 0xee, 0x05, 0xb4, 0x1b, 0xd5, 0x78, 0x8d, 0x3f, + 0x03, 0xab, 0x0c, 0x81, 0xfc, 0xba, 0x6a, 0x18, 0x3e, 0xc4, 0xb8, 0x98, 0xa7, 0xb1, 0xb7, 0x7b, + 0x1d, 0xb1, 0x18, 0xb2, 0x0d, 0x41, 0x24, 0x65, 0x25, 0x5e, 0x3b, 0x0e, 0x97, 0x02, 0xaa, 0xeb, + 0x48, 0xf1, 0x98, 0x6a, 0x2e, 0x4d, 0x35, 0x04, 0x91, 0x94, 0x95, 0x78, 0x2d, 0xa2, 0x3a, 0x05, + 0x05, 0xaf, 0xa5, 0x35, 0x61, 0xbb, 0x58, 0xa0, 0xf2, 0xae, 0xcb, 0xa1, 0xdf, 0xe4, 0xc8, 0x6f, + 0xf2, 0xb1, 0xdb, 0xae, 0x14, 0x1f, 0xfe, 0xdf, 0x5f, 0x67, 0xba, 0xeb, 0x7e, 0xdb, 0x23, 0x48, + 0xfe, 0xb5, 0xa5, 0x9d, 0xc3, 0xb6, 0xc2, 0x4e, 0xf3, 0xdf, 0x83, 0xb9, 0x6b, 0xd5, 0x6e, 0xc1, + 0xe2, 0x27, 0x94, 0x66, 0x2b, 0xaa, 0x52, 0x60, 0xb2, 0x44, 0x89, 0xac, 0xa8, 0xce, 0x21, 0xfa, + 0x68, 0xfe, 0x9f, 0x3b, 0x31, 0xf3, 0xee, 0x4e, 0xcc, 0x48, 0xdb, 0x40, 0x18, 0xb6, 0x93, 0x02, + 0xb1, 0x87, 0x5c, 0x0c, 0xa5, 0x7f, 0x73, 0x60, 0xa5, 0x86, 0xcd, 0x9f, 0x0c, 0x8b, 0xcc, 0xc8, + 0x6b, 0x3f, 0x8e, 0xd2, 0x34, 0x4b, 0x35, 0xe5, 0x7b, 0x1d, 0x71, 0x29, 0xd4, 0x74, 0x82, 0x92, + 0x0e, 0x58, 0xee, 0x7b, 0xad, 0xee, 0xab, 0x04, 0x32, 0x67, 0x55, 0xa7, 0x74, 0x55, 0x15, 0xea, + 0xbd, 0x8e, 0xb8, 0x19, 0x06, 0x4a, 0x51, 0x49, 0xca, 0x92, 0x3e, 0xe0, 0x6f, 0xfe, 0x66, 0xb4, + 0x99, 0x43, 0x43, 0xfd, 0x3c, 0x43, 0x23, 0x27, 0x6a, 0x26, 0x80, 0x62, 0xba, 0x28, 0x71, 0xc5, + 0xba, 0x1c, 0x58, 0xa8, 0x61, 0x93, 0x9d, 0x83, 0xa3, 0xed, 0xcf, 0x7d, 0x38, 0xfb, 0x67, 0x9f, + 0x65, 0xff, 0x1f, 0x40, 0x41, 0x75, 0x50, 0xcb, 0x25, 0xb4, 0x56, 0x53, 0xf8, 0x96, 0xc1, 0x8f, + 0xf2, 0x54, 0x80, 0x0d, 0xb0, 0x96, 0xc8, 0x31, 0xce, 0xfd, 0x21, 0x4b, 0x7b, 0x63, 0x05, 0x9a, + 0x96, 0xab, 0x40, 0x63, 0x06, 0x12, 0x5c, 0x82, 0x8d, 0x7e, 0x7e, 0xd8, 0xd7, 0x53, 0x32, 0xec, + 0xf4, 0x3a, 0xe2, 0x76, 0x5a, 0x86, 0x04, 0x4c, 0x52, 0xd6, 0xe2, 0xf5, 0x0b, 0x5f, 0x1f, 0xc9, + 0x6a, 0x60, 0x12, 0xb3, 0xe6, 0xc6, 0xb3, 0x26, 0x60, 0x49, 0xd6, 0x2a, 0x26, 0xc3, 0x1a, 0xe7, + 0x9f, 0xa3, 0x71, 0x93, 0x36, 0x86, 0x94, 0x96, 0x91, 0xd4, 0x7c, 0x8d, 0xbe, 0x3a, 0xcf, 0x86, + 0x81, 0x35, 0xeb, 0xc1, 0x6c, 0x64, 0x7d, 0x40, 0x18, 0x6a, 0x64, 0x97, 0xd1, 0xe0, 0xac, 0xcc, + 0x07, 0x61, 0x6e, 0x5f, 0x8b, 0x1c, 0x7d, 0x55, 0xec, 0x70, 0xb0, 0x2d, 0xbd, 0xe5, 0xc0, 0x62, + 0x0d, 0x9b, 0xbf, 0xb9, 0xc6, 0x47, 0xed, 0xdb, 0x06, 0xd8, 0x18, 0xc8, 0x72, 0x46, 0x72, 0x1e, + 0xfe, 0x97, 0x07, 0xb9, 0x1a, 0x36, 0xf9, 0x2b, 0xb0, 0x9c, 0xfe, 0x50, 0xf8, 0x6a, 0x5c, 0x9f, + 0x1e, 0x9e, 0x02, 0xc2, 0xe1, 0xf4, 0xd8, 0x38, 0x93, 0x26, 0x58, 0x1c, 0x9c, 0x16, 0x7b, 0x13, + 0x48, 0x06, 0x90, 0xc2, 0xb7, 0xd3, 0x22, 0xe3, 0x60, 0x7f, 0x82, 0xf9, 0xb8, 0xd1, 0xed, 0x4e, + 0x38, 0x1d, 0x81, 0x84, 0xaf, 0xa7, 0x00, 0xc5, 0xec, 0x57, 0x60, 0x39, 0xdd, 0x4a, 0x26, 0xa9, + 0x97, 0xc2, 0x4e, 0x54, 0x6f, 0xdc, 0xb3, 0xd2, 0x00, 0x48, 0xbc, 0x81, 0xcf, 0x27, 0x30, 0xf4, + 0x61, 0xc2, 0xfe, 0x54, 0xb0, 0x28, 0x46, 0xe5, 0xf4, 0xbe, 0x5b, 0xe2, 0x1e, 0xbb, 0x25, 0xee, + 0x4d, 0xb7, 0xc4, 0xdd, 0x3e, 0x95, 0x32, 0x8f, 0x4f, 0xa5, 0xcc, 0xcb, 0xa7, 0x52, 0xe6, 0x8f, + 0x6f, 0x26, 0x8e, 0xae, 0x9b, 0xf8, 0xcb, 0x94, 0x0e, 0x31, 0xad, 0x40, 0x2d, 0xf9, 0xdd, 0xfb, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x99, 0xdc, 0x0e, 0x15, 0x7e, 0x0b, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used.