diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f3c78535d..62e698a83 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,5 +1,6 @@ name: Build on: + merge_group: pull_request: push: branches: diff --git a/.github/workflows/check-proto.yaml b/.github/workflows/check-proto.yaml index c8008f740..a0e2c40b9 100644 --- a/.github/workflows/check-proto.yaml +++ b/.github/workflows/check-proto.yaml @@ -1,6 +1,7 @@ name: Check Proto Generation on: + merge_group: pull_request: push: branches: diff --git a/.github/workflows/check-swagger.yml b/.github/workflows/check-swagger.yml index f4b972aa7..aee464190 100644 --- a/.github/workflows/check-swagger.yml +++ b/.github/workflows/check-swagger.yml @@ -1,6 +1,7 @@ name: Check Proto Swagger Generation on: + merge_group: pull_request: push: branches: diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 3e1ea6a4c..53311f8fe 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -17,6 +17,7 @@ permissions: security-events: write on: + merge_group: push: branches: [develop, main, master] pull_request: diff --git a/.github/workflows/consensuswarn.yml b/.github/workflows/consensuswarn.yml index eabe81ac8..4d6462c05 100644 --- a/.github/workflows/consensuswarn.yml +++ b/.github/workflows/consensuswarn.yml @@ -1,6 +1,7 @@ name: "Consensus Warn" on: + merge_group: pull_request_target: types: - opened diff --git a/.github/workflows/dependencies.yml b/.github/workflows/dependencies.yml index 45a8e6ce1..45fd6671b 100644 --- a/.github/workflows/dependencies.yml +++ b/.github/workflows/dependencies.yml @@ -1,6 +1,8 @@ name: "Dependency Review" # only run on pull requests and not any branch. -on: pull_request +on: + merge_group: + pull_request: permissions: contents: read diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index d04a1edfd..1b8bf6d4b 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -1,5 +1,6 @@ name: Docker build for localnet on: + merge_group: pull_request: push: branches: @@ -12,7 +13,7 @@ permissions: contents: read jobs: - build: + docker-localnet-build: runs-on: ubuntu-latest steps: diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index f6b9514fe..ee30cced5 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -1,5 +1,6 @@ name: Docker build in root directory on: + merge_group: pull_request: push: branches: @@ -12,7 +13,7 @@ permissions: contents: read jobs: - build: + docker-build: runs-on: ubuntu-latest steps: diff --git a/.github/workflows/goreleaser.yml b/.github/workflows/goreleaser.yml index e1bcd53e9..ea2899347 100644 --- a/.github/workflows/goreleaser.yml +++ b/.github/workflows/goreleaser.yml @@ -4,6 +4,7 @@ permissions: contents: write on: + merge_group: push: tags: - "v*.*.*" diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index a13601988..69bb9a764 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -2,6 +2,7 @@ name: Lint # Lint runs golangci-lint over the entire exocore repository. The `golangci` will pass without # running if no *.{go, mod, sum} files have been changed. on: + merge_group: pull_request: push: branches: diff --git a/.github/workflows/proto.yml b/.github/workflows/proto.yml index 01a888574..acee70725 100644 --- a/.github/workflows/proto.yml +++ b/.github/workflows/proto.yml @@ -2,6 +2,7 @@ name: Protobuf # Protobuf runs buf (https://buf.build/) lint and check-breakage # This workflow is only run when a .proto file has been changed on: + merge_group: pull_request: paths: - "proto/**" diff --git a/.github/workflows/security.yml b/.github/workflows/security.yml index 56a561ef6..71c46a262 100644 --- a/.github/workflows/security.yml +++ b/.github/workflows/security.yml @@ -1,5 +1,6 @@ name: Run Gosec on: + merge_group: pull_request: push: branches: diff --git a/.github/workflows/semgrep.yml b/.github/workflows/semgrep.yml index 2e4f1bb02..e358a8504 100644 --- a/.github/workflows/semgrep.yml +++ b/.github/workflows/semgrep.yml @@ -2,6 +2,7 @@ name: Semgrep permissions: contents: read on: + merge_group: # Scan changed files in PRs, block on new issues only (existing issues ignored) pull_request: {} push: @@ -25,7 +26,7 @@ jobs: steps: - name: Permission issue fix # semgrep for some reason sets the working directory to exocore/exocore - run: git config --global --add safe.directory /__w/exocore/exocore + run: git config --global --add safe.directory /__w/imuachain/imuachain - uses: actions/checkout@v4 - name: Get Diff uses: technote-space/get-diff-action@v6.1.2 diff --git a/.github/workflows/super-linter.yml b/.github/workflows/super-linter.yml index 4ca91c78c..e54b8f801 100644 --- a/.github/workflows/super-linter.yml +++ b/.github/workflows/super-linter.yml @@ -11,6 +11,7 @@ permissions: contents: read on: + merge_group: push: branches: ["develop", "main", "master"] pull_request: diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 42592ae99..1a8461c5e 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -7,6 +7,7 @@ # https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ name: Tests on: + merge_group: pull_request: push: branches: @@ -44,3 +45,21 @@ jobs: run: | make test-unit-cover if: env.GIT_DIFF + test-unit-e2e: + runs-on: ubuntu-latest + steps: + - uses: actions/setup-go@v4 + with: + go-version: '1.21' + check-latest: true + - uses: actions/checkout@v4 + - uses: technote-space/get-diff-action@v6.1.2 + with: + PATTERNS: | + **/**.go + go.mod + go.sum + - name: Test e2e cases + run: | + make test-unit-e2e + if: env.GIT_DIFF diff --git a/Makefile b/Makefile index 51962263f..bb536c034 100644 --- a/Makefile +++ b/Makefile @@ -186,7 +186,7 @@ all: build build-all: tools build lint test vulncheck -.PHONY: distclean clean build-all +.PHONY: distclean clean build-all build ############################################################################### ### makTools & Dependencies ### @@ -305,23 +305,35 @@ test-all: test-unit test-race # we want to include all unit tests in the subfolders (tests/e2e/*) # We also want to exclude the testutil folder because it contains only # helper functions for the tests. -PACKAGES_UNIT=$(shell go list ./... | grep -v '/tests/e2e$$' | grep -v 'testutil') +PACKAGES_UNIT=$(shell go list ./... | grep -v '/tests/e2e' | grep -v 'testutil') +PACKAGES_UNIT_E2E=$(shell go list ./... | grep '/tests/e2e') TEST_PACKAGES=./... -TEST_TARGETS := test-unit test-unit-cover test-race +TEST_TARGETS := test-unit test-unit-cover test-race test-unit-e2e test-unit-cover-local test-unit-e2e-local # Test runs-specific rules. To add a new test target, just add # a new rule, customise ARGS or TEST_PACKAGES ad libitum, and # append the new rule to the TEST_TARGETS list. -test-unit: ARGS=-timeout=15m -gcflags=all=-l +test-unit: ARGS=-timeout=15m -gcflags=all=-l --tags devmode test-unit: TEST_PACKAGES=$(PACKAGES_UNIT) test-race: ARGS=-race test-race: TEST_PACKAGES=$(PACKAGES_NOSIMULATION) $(TEST_TARGETS): run-tests -test-unit-cover: ARGS=-timeout=15m -coverprofile=cover.out -covermode=atomic -gcflags=all=-l +test-unit-cover: ARGS=-timeout=15m -coverprofile=cover.out -covermode=atomic -gcflags=all=-l --tags devmode test-unit-cover: TEST_PACKAGES=$(PACKAGES_UNIT) +test-unit-e2e: ARGS=-timeout=15m --tags devmode +test-unit-e2e: TEST_PACKAGES=$(PACKAGES_UNIT_E2E) + +test-unit-cover-local: ARGS=-timeout=30m -coverprofile=cover.out -covermode=atomic -gcflags=all=-l --tags 'devmode local' +test-unit-cover-local: TEST_PACKAGES=$(PACKAGES_UNIT) + +test-unit-e2e-local: TEST_OPTION=local +test-unit-e2e-local: ARGS=-timeout=30m --tags devmode +test-unit-e2e-local: TEST_PACKAGES=$(PACKAGES_UNIT_E2E) + + test-e2e: @if [ -z "$(TARGET_VERSION)" ]; then \ echo "Building docker image from local codebase"; \ @@ -335,9 +347,9 @@ test-e2e: run-tests: ifneq (,$(shell which tparse 2>/dev/null)) - go test -mod=readonly -json $(ARGS) $(EXTRA_ARGS) $(TEST_PACKAGES) | tparse + TEST_OPTION=$(TEST_OPTION) go test -mod=readonly -json $(ARGS) $(EXTRA_ARGS) $(TEST_PACKAGES) | tparse else - go test -mod=readonly $(ARGS) $(EXTRA_ARGS) $(TEST_PACKAGES) + TEST_OPTION=$(TEST_OPTION) go test -mod=readonly $(ARGS) $(EXTRA_ARGS) $(TEST_PACKAGES) endif test-import: diff --git a/app/ante/cosmos/sigverify.go b/app/ante/cosmos/sigverify.go index 3d91e1cdd..92986fe28 100644 --- a/app/ante/cosmos/sigverify.go +++ b/app/ante/cosmos/sigverify.go @@ -391,6 +391,8 @@ func (isd IncrementSequenceDecorator) AnteHandle(ctx sdk.Context, tx sdk.Tx, sim msg := msg.(*oracletypes.MsgCreatePrice) if accAddress, err := sdk.AccAddressFromBech32(msg.Creator); err != nil { return ctx, errors.New("invalid address") + // #nosec G115 // safe conversion + // TODO: define msg.Nonce as uint32 to avoid conversion } else if _, err := isd.oracleKeeper.CheckAndIncreaseNonce(ctx, sdk.ConsAddress(accAddress).String(), msg.FeederID, uint32(msg.Nonce)); err != nil { return ctx, err } @@ -445,6 +447,7 @@ func (vscd ValidateSigCountDecorator) AnteHandle(ctx sdk.Context, tx sdk.Tx, sim sigCount := 0 for _, pk := range pubKeys { sigCount += CountSubKeys(pk) + // #nosec G115 if uint64(sigCount) > params.TxSigLimit { return ctx, sdkerrors.ErrTooManySignatures.Wrapf("signatures: %d, limit: %d", sigCount, params.TxSigLimit) } diff --git a/app/ante/cosmos/txsize_gas.go b/app/ante/cosmos/txsize_gas.go index b9684cd11..db448c75a 100644 --- a/app/ante/cosmos/txsize_gas.go +++ b/app/ante/cosmos/txsize_gas.go @@ -76,12 +76,14 @@ func (cgts ConsumeTxSizeGasDecorator) AnteHandle(ctx sdk.Context, tx sdk.Tx, sim } // use stdsignature to mock the size of a full signature + // #nosec G115 simSig := legacytx.StdSignature{ // nolint:staticcheck // this will be removed when proto is ready Signature: simSecp256k1Sig[:], PubKey: pubkey, } sigBz := legacy.Cdc.MustMarshal(simSig) + // #nosec G115 cost := sdk.Gas(len(sigBz) + 6) // If the pubkey is a multi-signature pubkey, then we estimate for the maximum diff --git a/app/app.go b/app/app.go index be960b1c0..c2b972da3 100644 --- a/app/app.go +++ b/app/app.go @@ -916,14 +916,17 @@ func NewExocoreApp( app.mm.SetOrderBeginBlockers( upgradetypes.ModuleName, // to upgrade the chain capabilitytypes.ModuleName, // before any module with capabilities like IBC - epochstypes.ModuleName, // to update the epoch - feemarkettypes.ModuleName, // set EIP-1559 gas prices - evmtypes.ModuleName, // stores chain id in memory - slashingtypes.ModuleName, // TODO after reward - evidencetypes.ModuleName, // TODO after reward - stakingtypes.ModuleName, // track historical info - ibcexported.ModuleName, // handles upgrades of chain and hence client - authz.ModuleName, // clear expired approvals + // beginblock of oracle will fill params cache, need to be put before epochs will use the params + // it need to put before other modules to fill params cahce before access + oracleTypes.ModuleName, + epochstypes.ModuleName, // to update the epoch + feemarkettypes.ModuleName, // set EIP-1559 gas prices + evmtypes.ModuleName, // stores chain id in memory + slashingtypes.ModuleName, // TODO after reward + evidencetypes.ModuleName, // TODO after reward + stakingtypes.ModuleName, // track historical info + ibcexported.ModuleName, // handles upgrades of chain and hence client + authz.ModuleName, // clear expired approvals // no-op modules ibctransfertypes.ModuleName, icatypes.ModuleName, @@ -944,7 +947,6 @@ func NewExocoreApp( rewardTypes.ModuleName, exoslashTypes.ModuleName, avsManagerTypes.ModuleName, - oracleTypes.ModuleName, distrtypes.ModuleName, ) diff --git a/app/ethtest_helper.go b/app/ethtest_helper.go index 95b2080bb..ac2f59647 100644 --- a/app/ethtest_helper.go +++ b/app/ethtest_helper.go @@ -204,6 +204,7 @@ func genesisStateWithValSet(codec codec.Codec, genesisState simapp.GenesisState, OperatorAddress: operator.String(), OperatorInfo: operatortypes.OperatorInfo{ EarningsAddr: operator.String(), + ApproveAddr: operator.String(), OperatorMetaInfo: "operator1", Commission: stakingtypes.NewCommission(sdk.ZeroDec(), sdk.ZeroDec(), sdk.ZeroDec()), }, diff --git a/app/test_helpers.go b/app/test_helpers.go index 46c1a5b99..f937f9a87 100644 --- a/app/test_helpers.go +++ b/app/test_helpers.go @@ -246,6 +246,7 @@ func GenesisStateWithValSet(app *ExocoreApp, genesisState simapp.GenesisState, OperatorInfo: operatortypes.OperatorInfo{ EarningsAddr: operator.String(), OperatorMetaInfo: "operator1", + ApproveAddr: operator.String(), Commission: stakingtypes.NewCommission(sdk.ZeroDec(), sdk.ZeroDec(), sdk.ZeroDec()), }, }, diff --git a/client/docs/config.json b/client/docs/config.json index f06e98c26..9b8292ea8 100644 --- a/client/docs/config.json +++ b/client/docs/config.json @@ -142,7 +142,9 @@ "url": "./tmp-swagger-gen/exocore/dogfood/v1/query.swagger.json", "operationIds": { "rename": { - "Params": "DogfoodParams" + "Params": "DogfoodParams", + "Validator": "DogfoodValidator", + "Validators": "DogfoodValidators" } } }, diff --git a/client/docs/swagger-ui/swagger.json b/client/docs/swagger-ui/swagger.json index 8bd12cad3..3b0166e5f 100644 --- a/client/docs/swagger-ui/swagger.json +++ b/client/docs/swagger-ui/swagger.json @@ -73,6 +73,111 @@ ] } }, + "/exocore/epochs/v1/epoch/{identifier}": { + "get": { + "summary": "EpochInfo provides the epoch information for the specified identifier.", + "operationId": "EpochInfo", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "type": "object", + "properties": { + "epoch": { + "description": "epoch is the EpochInfo for the requested epoch.", + "type": "object", + "properties": { + "identifier": { + "type": "string", + "description": "identifier is the unique identifier of the epoch, typically, the measure\nof time used to define the epoch." + }, + "start_time": { + "type": "string", + "format": "date-time", + "description": "start_time of the epoch. it may be in the future, in which case, the epoch\nis not yet active." + }, + "duration": { + "type": "string", + "description": "duration is the time in between epoch i and epoch i+1. for an epoch to\nbe meaningful, the duration should be more than the block time of the chain." + }, + "current_epoch": { + "type": "string", + "format": "int64", + "description": "current_epoch is the number of the currently ongoing epoch, identified by the\nidentifier. the first epoch starts during the first block whose block time\nis greater than or equal to the start_time of the epoch." + }, + "current_epoch_start_time": { + "type": "string", + "format": "date-time", + "description": "current_epoch_start_time is the start time of the current time interval, which\nis (current_epoch_start_time, current_epoch_start_time + duration]. when we say\nthat the timer has \"ticked\", it means that the interval's components are increased\nby the duration. note, however, that only one tick can occur in any given block.\n\nthe above constraint results in a deviation from the \"ideal\" tick time / the wall\nclock time. this is particularly obvious when a chain goes down.\nsay, at t = 13, the interval is (10, 10 + 5 = 15].\nat t = 14, it is unchanged even in a new block, since 10 < t <= 15\nat t = 15, it is unchanged even in a new block, since 10 < t <= 15\nat t = 16, it increases to (15, 20]. but then assume that the chain goes down\nand comes back up at t = 35, producing 1 block each second after that as well.\nat t = 35, the interval increases by 1 tick to (15, 20] (at the block end).\nat t = 36, it increases to (20, 25].\nat t = 37, it increases to (25, 30].\nat t = 38, it increases to (30, 35].\nat t = 39, it increases to (35, 40].\nat t = 40, it is unchanged even in a new block, since 35 < t <= 40.\nat t = 41, it increases to (40, 45]." + }, + "epoch_counting_started": { + "type": "boolean", + "description": "epoch_counting_started is true if the epoch counting has started for this identifier.\nit is independent of the epoch number or the time; it just indicates that at least\none epoch for this identifier has started counting." + }, + "current_epoch_start_height": { + "type": "string", + "format": "int64", + "description": "current_epoch_start_height is the block height at which the current epoch started,\nor the height at which the timer last \"ticked\"." + } + } + }, + "block_time": { + "type": "string", + "format": "date-time", + "description": "block_time is the block time of the query block ctx." + } + }, + "description": "QueryEpochInfoResponse is the response type for the Query/EpochInfo RPC method." + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + }, + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "type": "object", + "properties": { + "type_url": { + "type": "string" + }, + "value": { + "type": "string", + "format": "byte" + } + } + } + } + } + } + } + }, + "parameters": [ + { + "name": "identifier", + "description": "identifier of the epoch for which the information is requested.", + "in": "path", + "required": true, + "type": "string" + } + ], + "tags": [ + "Query" + ] + } + }, "/exocore/epochs/v1/epochs": { "get": { "summary": "EpochInfos provide a list of currently running epochs.", @@ -17077,41 +17182,47 @@ }, "/exocore/dogfood/v1/validator/{cons_addr}": { "get": { - "summary": "QueryValidator queries the validator for the given consensus address.", - "operationId": "QueryValidator", + "summary": "Validator queries the validator for the given consensus address. This is the bare-minimum validator:\npublic key, consensus address, power. No other params such as commission, jailed, etc. are included.", + "operationId": "DogfoodValidator", "responses": { "200": { "description": "A successful response.", "schema": { "type": "object", "properties": { - "address": { - "type": "string", - "format": "byte", - "description": "The address, as derived from the consensus key. It has no relation\nwith the operator's account address." - }, - "power": { - "type": "string", - "format": "int64", - "title": "Last known power" - }, - "pubkey": { + "validator": { + "description": "validator is the validator being queried.", "type": "object", "properties": { - "type_url": { + "address": { "type": "string", - "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." + "format": "byte", + "description": "The address, as derived from the consensus key. It has no relation\nwith the operator's account address." }, - "value": { + "power": { "type": "string", - "format": "byte", - "description": "Must be a valid serialized protocol buffer of the above specified type." + "format": "int64", + "title": "Last known power" + }, + "pubkey": { + "type": "object", + "properties": { + "type_url": { + "type": "string", + "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." + }, + "value": { + "type": "string", + "format": "byte", + "description": "Must be a valid serialized protocol buffer of the above specified type." + } + }, + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(&foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\nExample 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\nExample 4: Pack and unpack a message in Go\n\n foo := &pb.Foo{...}\n any, err := anypb.New(foo)\n if err != nil {\n ...\n }\n ...\n foo := &pb.Foo{}\n if err := any.UnmarshalTo(foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": ,\n \"lastName\": \n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" } - }, - "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(&foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\nExample 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\nExample 4: Pack and unpack a message in Go\n\n foo := &pb.Foo{...}\n any, err := anypb.New(foo)\n if err != nil {\n ...\n }\n ...\n foo := &pb.Foo{}\n if err := any.UnmarshalTo(foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": ,\n \"lastName\": \n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" + } } }, - "description": "ExocoreValidator is a validator that is part of the Exocore network. It is\nused to validate and sign blocks." + "description": "QueryValidatorResponse is response type for the Query/Validator RPC method." } }, "default": { @@ -17165,6 +17276,153 @@ ] } }, + "/exocore/dogfood/v1/validators": { + "get": { + "summary": "Validators queries all validators. This is the bare-minimum validator: public key, consensus address, power.\nNo other params such as commission, jailed, etc. are included.", + "operationId": "DogfoodValidators", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "type": "object", + "properties": { + "validators": { + "type": "array", + "items": { + "type": "object", + "properties": { + "address": { + "type": "string", + "format": "byte", + "description": "The address, as derived from the consensus key. It has no relation\nwith the operator's account address." + }, + "power": { + "type": "string", + "format": "int64", + "title": "Last known power" + }, + "pubkey": { + "type": "object", + "properties": { + "type_url": { + "type": "string", + "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." + }, + "value": { + "type": "string", + "format": "byte", + "description": "Must be a valid serialized protocol buffer of the above specified type." + } + }, + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(&foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\nExample 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\nExample 4: Pack and unpack a message in Go\n\n foo := &pb.Foo{...}\n any, err := anypb.New(foo)\n if err != nil {\n ...\n }\n ...\n foo := &pb.Foo{}\n if err := any.UnmarshalTo(foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": ,\n \"lastName\": \n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" + } + }, + "description": "ExocoreValidator is a validator that is part of the Exocore network. It is\nused to validate and sign blocks." + }, + "description": "validators is the list of all validators." + }, + "pagination": { + "description": "pagination defines the pagination in the response.", + "type": "object", + "properties": { + "next_key": { + "type": "string", + "format": "byte", + "description": "next_key is the key to be passed to PageRequest.key to\nquery the next page most efficiently. It will be empty if\nthere are no more results." + }, + "total": { + "type": "string", + "format": "uint64", + "title": "total is total number of results available if PageRequest.count_total\nwas set, its value is undefined otherwise" + } + } + } + }, + "description": "QueryAllValidatorsResponse is response type for the Query/AllValidators RPC method." + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + }, + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "type": "object", + "properties": { + "type_url": { + "type": "string", + "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." + }, + "value": { + "type": "string", + "format": "byte", + "description": "Must be a valid serialized protocol buffer of the above specified type." + } + }, + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(&foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\nExample 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\nExample 4: Pack and unpack a message in Go\n\n foo := &pb.Foo{...}\n any, err := anypb.New(foo)\n if err != nil {\n ...\n }\n ...\n foo := &pb.Foo{}\n if err := any.UnmarshalTo(foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": ,\n \"lastName\": \n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" + } + } + } + } + } + }, + "parameters": [ + { + "name": "pagination.key", + "description": "key is a value returned in PageResponse.next_key to begin\nquerying the next page most efficiently. Only one of offset or key\nshould be set.", + "in": "query", + "required": false, + "type": "string", + "format": "byte" + }, + { + "name": "pagination.offset", + "description": "offset is a numeric offset that can be used when key is unavailable.\nIt is less efficient than using key. Only one of offset or key should\nbe set.", + "in": "query", + "required": false, + "type": "string", + "format": "uint64" + }, + { + "name": "pagination.limit", + "description": "limit is the total number of results to be returned in the result page.\nIf left empty it will default to a value to be set by each app.", + "in": "query", + "required": false, + "type": "string", + "format": "uint64" + }, + { + "name": "pagination.count_total", + "description": "count_total is set to true to indicate that the result set should include\na count of the total number of items available for pagination in UIs.\ncount_total is only respected when offset is used. It is ignored when key\nis set.", + "in": "query", + "required": false, + "type": "boolean" + }, + { + "name": "pagination.reverse", + "description": "reverse is set to true if results are to be returned in the descending order.\n\nSince: cosmos-sdk 0.43", + "in": "query", + "required": false, + "type": "boolean" + } + ], + "tags": [ + "Query" + ] + } + }, "/exocore/assets/v1/all_assets": { "get": { "summary": "AllStakingAssetsInfo queries all staking assets info.", @@ -18574,11 +18832,6 @@ "type": "string", "title": "oracle_malicious_jail_duration defines the duratin one validator should be jailed for malicious behavior" }, - "slash_fraction_miss": { - "type": "string", - "format": "byte", - "title": "slash_fraction_miss defines the fraction one validator should be punished for msissing reporting price" - }, "slash_fraction_malicious": { "type": "string", "format": "byte", @@ -19321,11 +19574,6 @@ "type": "string", "title": "oracle_malicious_jail_duration defines the duratin one validator should be jailed for malicious behavior" }, - "slash_fraction_miss": { - "type": "string", - "format": "byte", - "title": "slash_fraction_miss defines the fraction one validator should be punished for msissing reporting price" - }, "slash_fraction_malicious": { "type": "string", "format": "byte", @@ -19693,11 +19941,6 @@ "type": "string", "title": "oracle_malicious_jail_duration defines the duratin one validator should be jailed for malicious behavior" }, - "slash_fraction_miss": { - "type": "string", - "format": "byte", - "title": "slash_fraction_miss defines the fraction one validator should be punished for msissing reporting price" - }, "slash_fraction_malicious": { "type": "string", "format": "byte", @@ -19774,6 +20017,11 @@ "schema": { "type": "object", "properties": { + "version": { + "type": "string", + "format": "int64", + "title": "version of the staker validator list changes" + }, "staker_info": { "title": "all staker infos under the specified asset", "type": "object", @@ -19907,6 +20155,11 @@ "schema": { "type": "object", "properties": { + "version": { + "type": "string", + "format": "int64", + "title": "version of the staker validator list changes" + }, "staker_infos": { "type": "array", "items": { @@ -20091,6 +20344,11 @@ "schema": { "type": "object", "properties": { + "version": { + "type": "string", + "format": "int64", + "title": "version of the staker validator list changes" + }, "staker_list": { "title": "staker list including all stakers of request asset", "type": "object", @@ -22880,6 +23138,11 @@ "type": "string", "format": "int64", "title": "undelegation_filter_height records the height before which undelegations are not slashed" + }, + "historical_voting_power": { + "type": "string", + "format": "int64", + "description": "the historical voting power at the time of the slash event." } } } @@ -24639,6 +24902,55 @@ }, "description": "QueryCurrentEpochResponse is the response type for the Query/EpochInfos RPC\nmethod." }, + "exocore.epochs.v1.QueryEpochInfoResponse": { + "type": "object", + "properties": { + "epoch": { + "description": "epoch is the EpochInfo for the requested epoch.", + "type": "object", + "properties": { + "identifier": { + "type": "string", + "description": "identifier is the unique identifier of the epoch, typically, the measure\nof time used to define the epoch." + }, + "start_time": { + "type": "string", + "format": "date-time", + "description": "start_time of the epoch. it may be in the future, in which case, the epoch\nis not yet active." + }, + "duration": { + "type": "string", + "description": "duration is the time in between epoch i and epoch i+1. for an epoch to\nbe meaningful, the duration should be more than the block time of the chain." + }, + "current_epoch": { + "type": "string", + "format": "int64", + "description": "current_epoch is the number of the currently ongoing epoch, identified by the\nidentifier. the first epoch starts during the first block whose block time\nis greater than or equal to the start_time of the epoch." + }, + "current_epoch_start_time": { + "type": "string", + "format": "date-time", + "description": "current_epoch_start_time is the start time of the current time interval, which\nis (current_epoch_start_time, current_epoch_start_time + duration]. when we say\nthat the timer has \"ticked\", it means that the interval's components are increased\nby the duration. note, however, that only one tick can occur in any given block.\n\nthe above constraint results in a deviation from the \"ideal\" tick time / the wall\nclock time. this is particularly obvious when a chain goes down.\nsay, at t = 13, the interval is (10, 10 + 5 = 15].\nat t = 14, it is unchanged even in a new block, since 10 < t <= 15\nat t = 15, it is unchanged even in a new block, since 10 < t <= 15\nat t = 16, it increases to (15, 20]. but then assume that the chain goes down\nand comes back up at t = 35, producing 1 block each second after that as well.\nat t = 35, the interval increases by 1 tick to (15, 20] (at the block end).\nat t = 36, it increases to (20, 25].\nat t = 37, it increases to (25, 30].\nat t = 38, it increases to (30, 35].\nat t = 39, it increases to (35, 40].\nat t = 40, it is unchanged even in a new block, since 35 < t <= 40.\nat t = 41, it increases to (40, 45]." + }, + "epoch_counting_started": { + "type": "boolean", + "description": "epoch_counting_started is true if the epoch counting has started for this identifier.\nit is independent of the epoch number or the time; it just indicates that at least\none epoch for this identifier has started counting." + }, + "current_epoch_start_height": { + "type": "string", + "format": "int64", + "description": "current_epoch_start_height is the block height at which the current epoch started,\nor the height at which the timer last \"ticked\"." + } + } + }, + "block_time": { + "type": "string", + "format": "date-time", + "description": "block_time is the block time of the query block ctx." + } + }, + "description": "QueryEpochInfoResponse is the response type for the Query/EpochInfo RPC method." + }, "exocore.epochs.v1.QueryEpochsInfoResponse": { "type": "object", "properties": { @@ -40287,6 +40599,63 @@ }, "description": "Params defines the parameters for the module." }, + "exocore.dogfood.v1.QueryAllValidatorsResponse": { + "type": "object", + "properties": { + "validators": { + "type": "array", + "items": { + "type": "object", + "properties": { + "address": { + "type": "string", + "format": "byte", + "description": "The address, as derived from the consensus key. It has no relation\nwith the operator's account address." + }, + "power": { + "type": "string", + "format": "int64", + "title": "Last known power" + }, + "pubkey": { + "type": "object", + "properties": { + "type_url": { + "type": "string", + "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." + }, + "value": { + "type": "string", + "format": "byte", + "description": "Must be a valid serialized protocol buffer of the above specified type." + } + }, + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(&foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\nExample 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\nExample 4: Pack and unpack a message in Go\n\n foo := &pb.Foo{...}\n any, err := anypb.New(foo)\n if err != nil {\n ...\n }\n ...\n foo := &pb.Foo{}\n if err := any.UnmarshalTo(foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": ,\n \"lastName\": \n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" + } + }, + "description": "ExocoreValidator is a validator that is part of the Exocore network. It is\nused to validate and sign blocks." + }, + "description": "validators is the list of all validators." + }, + "pagination": { + "description": "pagination defines the pagination in the response.", + "type": "object", + "properties": { + "next_key": { + "type": "string", + "format": "byte", + "description": "next_key is the key to be passed to PageRequest.key to\nquery the next page most efficiently. It will be empty if\nthere are no more results." + }, + "total": { + "type": "string", + "format": "uint64", + "title": "total is total number of results available if PageRequest.count_total\nwas set, its value is undefined otherwise" + } + } + } + }, + "description": "QueryAllValidatorsResponse is response type for the Query/AllValidators RPC method." + }, "exocore.dogfood.v1.QueryOperatorOptOutFinishEpochResponse": { "type": "object", "properties": { @@ -40351,6 +40720,43 @@ }, "description": "QueryUndelegationMaturityEpochResponse is response type for the\nQuery/UndelegationMaturityEpoch RPC method." }, + "exocore.dogfood.v1.QueryValidatorResponse": { + "type": "object", + "properties": { + "validator": { + "description": "validator is the validator being queried.", + "type": "object", + "properties": { + "address": { + "type": "string", + "format": "byte", + "description": "The address, as derived from the consensus key. It has no relation\nwith the operator's account address." + }, + "power": { + "type": "string", + "format": "int64", + "title": "Last known power" + }, + "pubkey": { + "type": "object", + "properties": { + "type_url": { + "type": "string", + "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." + }, + "value": { + "type": "string", + "format": "byte", + "description": "Must be a valid serialized protocol buffer of the above specified type." + } + }, + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(&foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\nExample 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\nExample 4: Pack and unpack a message in Go\n\n foo := &pb.Foo{...}\n any, err := anypb.New(foo)\n if err != nil {\n ...\n }\n ...\n foo := &pb.Foo{}\n if err := any.UnmarshalTo(foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": ,\n \"lastName\": \n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" + } + } + } + }, + "description": "QueryValidatorResponse is response type for the Query/Validator RPC method." + }, "exocore.dogfood.v1.UndelegationRecordKeys": { "type": "object", "properties": { @@ -41278,11 +41684,6 @@ "type": "string", "title": "oracle_malicious_jail_duration defines the duratin one validator should be jailed for malicious behavior" }, - "slash_fraction_miss": { - "type": "string", - "format": "byte", - "title": "slash_fraction_miss defines the fraction one validator should be punished for msissing reporting price" - }, "slash_fraction_malicious": { "type": "string", "format": "byte", @@ -41776,11 +42177,6 @@ "type": "string", "title": "oracle_malicious_jail_duration defines the duratin one validator should be jailed for malicious behavior" }, - "slash_fraction_miss": { - "type": "string", - "format": "byte", - "title": "slash_fraction_miss defines the fraction one validator should be punished for msissing reporting price" - }, "slash_fraction_malicious": { "type": "string", "format": "byte", @@ -42263,11 +42659,6 @@ "type": "string", "title": "oracle_malicious_jail_duration defines the duratin one validator should be jailed for malicious behavior" }, - "slash_fraction_miss": { - "type": "string", - "format": "byte", - "title": "slash_fraction_miss defines the fraction one validator should be punished for msissing reporting price" - }, "slash_fraction_malicious": { "type": "string", "format": "byte", @@ -42538,11 +42929,6 @@ "type": "string", "title": "oracle_malicious_jail_duration defines the duratin one validator should be jailed for malicious behavior" }, - "slash_fraction_miss": { - "type": "string", - "format": "byte", - "title": "slash_fraction_miss defines the fraction one validator should be punished for msissing reporting price" - }, "slash_fraction_malicious": { "type": "string", "format": "byte", @@ -42558,6 +42944,11 @@ "exocore.oracle.v1.QueryStakerInfoResponse": { "type": "object", "properties": { + "version": { + "type": "string", + "format": "int64", + "title": "version of the staker validator list changes" + }, "staker_info": { "title": "all staker infos under the specified asset", "type": "object", @@ -42628,6 +43019,11 @@ "exocore.oracle.v1.QueryStakerInfosResponse": { "type": "object", "properties": { + "version": { + "type": "string", + "format": "int64", + "title": "version of the staker validator list changes" + }, "staker_infos": { "type": "array", "items": { @@ -42718,6 +43114,11 @@ "exocore.oracle.v1.QueryStakerListResponse": { "type": "object", "properties": { + "version": { + "type": "string", + "format": "int64", + "title": "version of the staker validator list changes" + }, "staker_list": { "title": "staker list including all stakers of request asset", "type": "object", @@ -43077,11 +43478,6 @@ "type": "string", "title": "oracle_malicious_jail_duration defines the duratin one validator should be jailed for malicious behavior" }, - "slash_fraction_miss": { - "type": "string", - "format": "byte", - "title": "slash_fraction_miss defines the fraction one validator should be punished for msissing reporting price" - }, "slash_fraction_malicious": { "type": "string", "format": "byte", @@ -43149,11 +43545,6 @@ "type": "string", "title": "oracle_malicious_jail_duration defines the duratin one validator should be jailed for malicious behavior" }, - "slash_fraction_miss": { - "type": "string", - "format": "byte", - "title": "slash_fraction_miss defines the fraction one validator should be punished for msissing reporting price" - }, "slash_fraction_malicious": { "type": "string", "format": "byte", @@ -44706,6 +45097,11 @@ "type": "string", "format": "int64", "title": "undelegation_filter_height records the height before which undelegations are not slashed" + }, + "historical_voting_power": { + "type": "string", + "format": "int64", + "description": "the historical voting power at the time of the slash event." } } } @@ -44806,6 +45202,11 @@ "type": "string", "format": "int64", "title": "undelegation_filter_height records the height before which undelegations are not slashed" + }, + "historical_voting_power": { + "type": "string", + "format": "int64", + "description": "the historical voting power at the time of the slash event." } } } @@ -45224,6 +45625,11 @@ "type": "string", "format": "int64", "title": "undelegation_filter_height records the height before which undelegations are not slashed" + }, + "historical_voting_power": { + "type": "string", + "format": "int64", + "description": "the historical voting power at the time of the slash event." } } } @@ -45632,6 +46038,11 @@ "type": "string", "format": "int64", "title": "undelegation_filter_height records the height before which undelegations are not slashed" + }, + "historical_voting_power": { + "type": "string", + "format": "int64", + "description": "the historical voting power at the time of the slash event." } }, "title": "SlashExecutionInfo is the actual execution state for a slash event" diff --git a/cmd/exocored/testnet.go b/cmd/exocored/testnet.go index cdcfdce8a..759856601 100644 --- a/cmd/exocored/testnet.go +++ b/cmd/exocored/testnet.go @@ -421,6 +421,7 @@ func getTestExocoreGenesis( OperatorInfo: operatortypes.OperatorInfo{ EarningsAddr: operator.String(), OperatorMetaInfo: "operator1", + ApproveAddr: operator.String(), Commission: stakingtypes.NewCommission( sdk.ZeroDec(), sdk.ZeroDec(), sdk.ZeroDec(), ), diff --git a/local_node.sh b/local_node.sh index 3513323c0..6ec055fba 100755 --- a/local_node.sh +++ b/local_node.sh @@ -135,6 +135,7 @@ if [[ $overwrite == "y" || $overwrite == "Y" ]]; then # x/operator jq '.app_state["operator"]["operators"][0]["operator_address"]="'"$LOCAL_ADDRESS_EXO"'"' "$GENESIS" >"$TMP_GENESIS" && mv "$TMP_GENESIS" "$GENESIS" jq '.app_state["operator"]["operators"][0]["operator_info"]["earnings_addr"]="'"$LOCAL_ADDRESS_EXO"'"' "$GENESIS" >"$TMP_GENESIS" && mv "$TMP_GENESIS" "$GENESIS" + jq '.app_state["operator"]["operators"][0]["operator_info"]["approve_addr"]="'"$LOCAL_ADDRESS_EXO"'"' "$GENESIS" >"$TMP_GENESIS" && mv "$TMP_GENESIS" "$GENESIS" jq '.app_state["operator"]["operators"][0]["operator_info"]["operator_meta_info"]="operator1"' "$GENESIS" >"$TMP_GENESIS" && mv "$TMP_GENESIS" "$GENESIS" jq '.app_state["operator"]["operators"][0]["operator_info"]["commission"]["commission_rates"]["rate"]="0.0"' "$GENESIS" >"$TMP_GENESIS" && mv "$TMP_GENESIS" "$GENESIS" jq '.app_state["operator"]["operators"][0]["operator_info"]["commission"]["commission_rates"]["max_rate"]="0.0"' "$GENESIS" >"$TMP_GENESIS" && mv "$TMP_GENESIS" "$GENESIS" diff --git a/precompiles/assets/assets_test.go b/precompiles/assets/assets_test.go index 797c6aeaa..54e788aa3 100644 --- a/precompiles/assets/assets_test.go +++ b/precompiles/assets/assets_test.go @@ -310,7 +310,7 @@ func (s *AssetsPrecompileSuite) TestRunWithdrawPrincipal() { AssetsAddress: assetAddress, OpAmount: depositAmount, } - err := s.App.AssetsKeeper.PerformDepositOrWithdraw(s.Ctx, params) + _, err := s.App.AssetsKeeper.PerformDepositOrWithdraw(s.Ctx, params) s.Require().NoError(err) } @@ -909,7 +909,7 @@ func (s *AssetsPrecompileSuite) TestGetStakerBalanceByToken() { WithdrawableAmount: sdkmath.NewInt(70), PendingUndelegationAmount: sdkmath.NewInt(30), } - err = s.App.AssetsKeeper.UpdateStakerAssetState(s.Ctx, stakerID, assetID, assetDelta) + _, err = s.App.AssetsKeeper.UpdateStakerAssetState(s.Ctx, stakerID, assetID, assetDelta) s.Require().NoError(err) input, err := s.precompile.Pack( diff --git a/precompiles/assets/tx.go b/precompiles/assets/tx.go index 0409c0399..81fdac1d7 100644 --- a/precompiles/assets/tx.go +++ b/precompiles/assets/tx.go @@ -49,7 +49,7 @@ func (p Precompile) DepositOrWithdraw( } // call assets keeper to perform the deposit or withdraw action - err = p.assetsKeeper.PerformDepositOrWithdraw(ctx, depositWithdrawParams) + finalDepositAmount, err := p.assetsKeeper.PerformDepositOrWithdraw(ctx, depositWithdrawParams) if err != nil { return nil, err } @@ -72,13 +72,8 @@ func (p Precompile) DepositOrWithdraw( } } - // get the latest asset state of staker to return. - stakerID, assetID := assetstypes.GetStakerIDAndAssetID(depositWithdrawParams.ClientChainLzID, depositWithdrawParams.StakerAddress, depositWithdrawParams.AssetsAddress) - info, err := p.assetsKeeper.GetStakerSpecifiedAssetInfo(ctx, stakerID, assetID) - if err != nil { - return nil, err - } - return method.Outputs.Pack(true, info.TotalDepositAmount.BigInt()) + // return the latest asset state of staker + return method.Outputs.Pack(true, finalDepositAmount.BigInt()) } func (p Precompile) RegisterOrUpdateClientChain( @@ -97,8 +92,7 @@ func (p Precompile) RegisterOrUpdateClientChain( if err != nil { return nil, err } - updated := p.assetsKeeper.ClientChainExists(ctx, clientChainInfo.LayerZeroChainID) - err = p.assetsKeeper.SetClientChainInfo(ctx, clientChainInfo) + updated, err := p.assetsKeeper.SetClientChainInfo(ctx, clientChainInfo) if err != nil { return nil, err } diff --git a/precompiles/avs/avs_test.go b/precompiles/avs/avs_test.go index f70ff1554..0887c5fd1 100644 --- a/precompiles/avs/avs_test.go +++ b/precompiles/avs/avs_test.go @@ -395,6 +395,7 @@ func (suite *AVSManagerPrecompileSuite) TestRegisterOperatorToAVS() { FromAddress: operatorAddress.String(), Info: &operatortypes.OperatorInfo{ EarningsAddr: operatorAddress.String(), + ApproveAddr: operatorAddress.String(), }, } _, err := suite.OperatorMsgServer.RegisterOperator(sdk.WrapSDKContext(suite.Ctx), registerReq) @@ -560,6 +561,7 @@ func (suite *AVSManagerPrecompileSuite) TestDeregisterOperatorFromAVS() { FromAddress: operatorAddress.String(), Info: &operatortypes.OperatorInfo{ EarningsAddr: operatorAddress.String(), + ApproveAddr: operatorAddress.String(), }, } _, err := suite.OperatorMsgServer.RegisterOperator(sdk.WrapSDKContext(suite.Ctx), registerReq) diff --git a/precompiles/avs/events.go b/precompiles/avs/events.go index e4c78d2e4..ec659198e 100644 --- a/precompiles/avs/events.go +++ b/precompiles/avs/events.go @@ -274,6 +274,7 @@ func (p Precompile) EmitTaskSubmittedByOperator(ctx sdk.Context, stateDB vm.Stat // Prepare the event data:sender,TaskResponse, BlsSignature, Phase arguments := abi.Arguments{event.Inputs[2], event.Inputs[3], event.Inputs[4], event.Inputs[5]} packed, err := arguments.Pack(common.Address(params.CallerAddress), params.TaskResponse, params.BlsSignature, uint8(params.Phase)) + if err != nil { return err } diff --git a/precompiles/avs/utils_test.go b/precompiles/avs/utils_test.go index ff890e8d9..0d618968f 100644 --- a/precompiles/avs/utils_test.go +++ b/precompiles/avs/utils_test.go @@ -61,7 +61,7 @@ func (suite *AVSManagerPrecompileSuite) prepareDeposit(assetAddress common.Addre OpAmount: suite.depositAmount, AssetsAddress: assetAddress[:], } - err := suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParam) + _, err := suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParam) suite.NoError(err) } diff --git a/precompiles/delegation/delegation_test.go b/precompiles/delegation/delegation_test.go index 11bba27e4..4238d1663 100644 --- a/precompiles/delegation/delegation_test.go +++ b/precompiles/delegation/delegation_test.go @@ -81,7 +81,7 @@ func (s *DelegationPrecompileSuite) TestRunDelegate() { AssetsAddress: usdtAddress, OpAmount: depositAmount, } - err := s.App.AssetsKeeper.PerformDepositOrWithdraw(s.Ctx, params) + _, err := s.App.AssetsKeeper.PerformDepositOrWithdraw(s.Ctx, params) s.Require().NoError(err) } registerOperator := func() { @@ -89,6 +89,7 @@ func (s *DelegationPrecompileSuite) TestRunDelegate() { FromAddress: opAccAddr, Info: &operatortypes.OperatorInfo{ EarningsAddr: opAccAddr, + ApproveAddr: opAccAddr, }, } _, err := s.OperatorMsgServer.RegisterOperator(s.Ctx, registerReq) @@ -301,7 +302,7 @@ func (s *DelegationPrecompileSuite) TestRunUnDelegate() { AssetsAddress: usdtAddress, OpAmount: depositAmount, } - err := s.App.AssetsKeeper.PerformDepositOrWithdraw(s.Ctx, params) + _, err := s.App.AssetsKeeper.PerformDepositOrWithdraw(s.Ctx, params) s.Require().NoError(err) } @@ -325,6 +326,7 @@ func (s *DelegationPrecompileSuite) TestRunUnDelegate() { FromAddress: operatorAddr, Info: &operatortypes.OperatorInfo{ EarningsAddr: operatorAddr, + ApproveAddr: operatorAddr, }, } _, err := s.OperatorMsgServer.RegisterOperator(s.Ctx, registerReq) diff --git a/precompiles/delegation/tx.go b/precompiles/delegation/tx.go index 0ca524589..e98bc0393 100644 --- a/precompiles/delegation/tx.go +++ b/precompiles/delegation/tx.go @@ -52,7 +52,6 @@ func (p Precompile) Delegate( if err != nil { return nil, err } - err = p.delegationKeeper.DelegateTo(ctx, delegationParams) if err != nil { return nil, err diff --git a/precompiles/reward/reward_test.go b/precompiles/reward/reward_test.go index 1ecefcbc6..840dd636c 100644 --- a/precompiles/reward/reward_test.go +++ b/precompiles/reward/reward_test.go @@ -66,7 +66,7 @@ func (s *RewardPrecompileTestSuite) TestRunRewardThroughClientChain() { AssetsAddress: usdtAddress, OpAmount: depositAmount, } - err := s.App.AssetsKeeper.PerformDepositOrWithdraw(s.Ctx, params) + _, err := s.App.AssetsKeeper.PerformDepositOrWithdraw(s.Ctx, params) s.Require().NoError(err) } diff --git a/precompiles/slash/slash_test.go b/precompiles/slash/slash_test.go index fb2fb3e17..27e7491c6 100644 --- a/precompiles/slash/slash_test.go +++ b/precompiles/slash/slash_test.go @@ -66,7 +66,7 @@ func (s *SlashPrecompileTestSuite) TestRunSlash() { AssetsAddress: usdtAddress, OpAmount: depositAmount, } - err := s.App.AssetsKeeper.PerformDepositOrWithdraw(s.Ctx, params) + _, err := s.App.AssetsKeeper.PerformDepositOrWithdraw(s.Ctx, params) s.Require().NoError(err) } diff --git a/proto/exocore/assets/v1/query.proto b/proto/exocore/assets/v1/query.proto index 449c507db..0e05cdc62 100644 --- a/proto/exocore/assets/v1/query.proto +++ b/proto/exocore/assets/v1/query.proto @@ -35,7 +35,7 @@ message QueryAllClientChainInfoResponse { // QueryStakingAssetInfo is the query for getting the staking asset info. message QueryStakingAssetInfo { // asset_id is the asset for which the query is made. - string asset_id = 1 ; + string asset_id = 1; } // QueryAllStakingAssetsInfo is the query for getting all staking assets info. @@ -51,7 +51,7 @@ message QueryAllStakingAssetsInfoResponse { // QueryStakerAssetInfo is the query for getting the staker asset info. message QueryStakerAssetInfo { // stake_id is the staker id for which the query is made. - string staker_id = 1 ; + string staker_id = 1; } // QueryAssetInfoResponse is the response for the staker asset info. @@ -63,9 +63,9 @@ message QueryAssetInfoResponse { // QuerySpecifiedAssetAmountReq is the query for getting the staker specified asset amount. message QuerySpecifiedAssetAmountReq { // staker_id is the staker id for which the query is made. - string staker_id = 1 ; + string staker_id = 1; // asset_id is the asset for which the query is made. - string asset_id = 2 ; + string asset_id = 2; } // QueryOperatorAssetInfos is the query for getting the operator asset info. diff --git a/proto/exocore/delegation/v1/query.proto b/proto/exocore/delegation/v1/query.proto index 333175e95..bac70f0d8 100644 --- a/proto/exocore/delegation/v1/query.proto +++ b/proto/exocore/delegation/v1/query.proto @@ -221,4 +221,5 @@ service Query { option (cosmos.query.v1.module_query_safe) = true; option (google.api.http).get = "/exocore/delegation/v1/delegated_stakers/{operator}/{asset_id}"; } + } diff --git a/proto/exocore/delegation/v1/tx.proto b/proto/exocore/delegation/v1/tx.proto index d34053526..e8f77ea9c 100644 --- a/proto/exocore/delegation/v1/tx.proto +++ b/proto/exocore/delegation/v1/tx.proto @@ -81,7 +81,7 @@ message UndelegationRecord { // It will be included in the key when storing the undelegation. uint64 undelegation_id = 8; // amount is the amount of the asset to be undelegated. - string amount = 9[ + string amount = 9 [ (cosmos_proto.scalar) = "cosmos.Int", (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", (gogoproto.nullable) = false diff --git a/proto/exocore/dogfood/v1/query.proto b/proto/exocore/dogfood/v1/query.proto index 3f2a56eb6..23c52f777 100644 --- a/proto/exocore/dogfood/v1/query.proto +++ b/proto/exocore/dogfood/v1/query.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package exocore.dogfood.v1; +import "cosmos/base/query/v1beta1/pagination.proto"; +import "cosmos/query/v1/query.proto"; import "exocore/dogfood/v1/dogfood.proto"; import "exocore/dogfood/v1/params.proto"; import "gogoproto/gogo.proto"; @@ -13,33 +15,47 @@ option go_package = "github.com/ExocoreNetwork/exocore/x/dogfood/types"; service Query { // Params queries the parameters of the module. rpc Params(QueryParamsRequest) returns (QueryParamsResponse) { + option (cosmos.query.v1.module_query_safe) = true; option (google.api.http).get = "/exocore/dogfood/v1/params"; } // OptOutsToFinish queries the operators whose opt-outs will finish at the given epoch. rpc OptOutsToFinish(QueryOptOutsToFinishRequest) returns (AccountAddresses) { + option (cosmos.query.v1.module_query_safe) = true; option (google.api.http).get = "/exocore/dogfood/v1/opt_outs_to_finish/{epoch}"; } // OperatorOptOutFinishEpoch queries the epoch when the operator's opt-out will finish. rpc OperatorOptOutFinishEpoch(QueryOperatorOptOutFinishEpochRequest) returns (QueryOperatorOptOutFinishEpochResponse) { + option (cosmos.query.v1.module_query_safe) = true; option (google.api.http).get = "/exocore/dogfood/v1/operator_opt_out_finish_epoch/{operator_acc_addr}"; } // UndelegationsToMature queries the undelegations that will mature at the given epoch. rpc UndelegationsToMature(QueryUndelegationsToMatureRequest) returns (UndelegationRecordKeys) { + option (cosmos.query.v1.module_query_safe) = true; option (google.api.http).get = "/exocore/dogfood/v1/undelegations_to_mature/{epoch}"; } // UndelegationMaturityEpoch queries the epoch when the undelegation will mature. rpc UndelegationMaturityEpoch(QueryUndelegationMaturityEpochRequest) returns (QueryUndelegationMaturityEpochResponse) { + option (cosmos.query.v1.module_query_safe) = true; option (google.api.http).get = "/exocore/dogfood/v1/undelegation_maturity_epoch/{record_key}"; } - // QueryValidator queries the validator for the given consensus address. - rpc QueryValidator(QueryValidatorRequest) returns (ExocoreValidator) { + // Validator queries the validator for the given consensus address. This is the bare-minimum validator: + // public key, consensus address, power. No other params such as commission, jailed, etc. are included. + rpc Validator(QueryValidatorRequest) returns (QueryValidatorResponse) { + option (cosmos.query.v1.module_query_safe) = true; option (google.api.http).get = "/exocore/dogfood/v1/validator/{cons_addr}"; } + + // Validators queries all validators. This is the bare-minimum validator: public key, consensus address, power. + // No other params such as commission, jailed, etc. are included. + rpc Validators(QueryAllValidatorsRequest) returns (QueryAllValidatorsResponse) { + option (cosmos.query.v1.module_query_safe) = true; + option (google.api.http).get = "/exocore/dogfood/v1/validators"; + } } // QueryParamsRequest is request type for the Query/Params RPC method. @@ -92,9 +108,29 @@ message QueryUndelegationMaturityEpochResponse { int64 epoch = 1; } -// QueryValidatorRequest is request type for the Query/QueryValidator RPC method. +// QueryValidatorRequest is request type for the Query/Validator RPC method. message QueryValidatorRequest { // cons_addr is the consensus address of the validator being queried. From the perspective of // this module, the acc_addr is not relevant and is thus not stored. string cons_addr = 1; } + +// QueryValidatorResponse is response type for the Query/Validator RPC method. +message QueryValidatorResponse { + // validator is the validator being queried. + ExocoreValidator validator = 1; +} + +// QueryAllValidatorsRequest is request type for the Query/AllValidators RPC method. +message QueryAllValidatorsRequest { + // pagination defines an optional pagination for the request. + cosmos.base.query.v1beta1.PageRequest pagination = 1; +} + +// QueryAllValidatorsResponse is response type for the Query/AllValidators RPC method. +message QueryAllValidatorsResponse { + // validators is the list of all validators. + repeated ExocoreValidator validators = 1 [(gogoproto.nullable) = false]; + // pagination defines the pagination in the response. + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} diff --git a/proto/exocore/epochs/v1/query.proto b/proto/exocore/epochs/v1/query.proto index e4c7c2357..c867c6717 100644 --- a/proto/exocore/epochs/v1/query.proto +++ b/proto/exocore/epochs/v1/query.proto @@ -20,6 +20,10 @@ service Query { rpc CurrentEpoch(QueryCurrentEpochRequest) returns (QueryCurrentEpochResponse) { option (google.api.http).get = "/exocore/epochs/v1/current_epoch/{identifier}"; } + // EpochInfo provides the epoch information for the specified identifier. + rpc EpochInfo(QueryEpochInfoRequest) returns (QueryEpochInfoResponse) { + option (google.api.http).get = "/exocore/epochs/v1/epoch/{identifier}"; + } } // QueryEpochsInfoRequest is the request type for the Query/EpochInfos RPC @@ -43,6 +47,23 @@ message QueryEpochsInfoResponse { cosmos.base.query.v1beta1.PageResponse pagination = 3; } +// QueryEpochInfoRequest is the request type for the Query/EpochInfo RPC method. +message QueryEpochInfoRequest { + // identifier of the epoch for which the information is requested. + string identifier = 1; +} + +// QueryEpochInfoResponse is the response type for the Query/EpochInfo RPC method. +message QueryEpochInfoResponse { + // epoch is the EpochInfo for the requested epoch. + EpochInfo epoch = 1 [(gogoproto.nullable) = false]; + // block_time is the block time of the query block ctx. + google.protobuf.Timestamp block_time = 2 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; +} + // QueryCurrentEpochRequest is the request type for the Query/EpochInfos RPC // method. message QueryCurrentEpochRequest { diff --git a/proto/exocore/operator/v1/tx.proto b/proto/exocore/operator/v1/tx.proto index a7f74cf4b..58d67d615 100644 --- a/proto/exocore/operator/v1/tx.proto +++ b/proto/exocore/operator/v1/tx.proto @@ -12,6 +12,9 @@ option go_package = "github.com/ExocoreNetwork/exocore/x/operator/types"; // DecValueField is a field that holds a value of sdk.LegacyDec type. message DecValueField { + // remove the default stringer for DecValueField and instead implement + // amount.String() to get the string value. + option (gogoproto.goproto_stringer) = false; // amount is the USD value of the asset, as an sdk.LegacyDec. string amount = 1 [ (cosmos_proto.scalar) = "cosmos.Dec", @@ -221,6 +224,8 @@ message SlashExecutionInfo { repeated SlashFromAssetsPool slash_assets_pool = 4 [(gogoproto.nullable) = false]; // undelegation_filter_height records the height before which undelegations are not slashed int64 undelegation_filter_height = 5; + // the historical voting power at the time of the slash event. + int64 historical_voting_power = 6; } // OperatorSlashInfo is the slash info of operator diff --git a/proto/exocore/oracle/v1/genesis.proto b/proto/exocore/oracle/v1/genesis.proto index 421434dd3..cf2275625 100644 --- a/proto/exocore/oracle/v1/genesis.proto +++ b/proto/exocore/oracle/v1/genesis.proto @@ -55,6 +55,8 @@ message StakerInfosAssets { string asset_id = 1; // stakerInfos repeated StakerInfo staker_infos = 2; + // nst_version is the version of nst to track validator list changes + int64 nst_version = 3; } // stakerListAssets bond stakerList to their related assets id @@ -63,6 +65,8 @@ message StakerListAssets { string asset_id = 1; // stakerList StakerList staker_list = 2; + // nst_version is the version of nst to track validator list changes + int64 nst_version = 3; } // ValidatorMissedRounds record missed rounds indexes for a validator which consAddr corresponding to the address diff --git a/proto/exocore/oracle/v1/params.proto b/proto/exocore/oracle/v1/params.proto index a9f8c785b..bc8ad4faf 100644 --- a/proto/exocore/oracle/v1/params.proto +++ b/proto/exocore/oracle/v1/params.proto @@ -71,14 +71,8 @@ message SlashingParams { (amino.dont_omitempty) = true, (gogoproto.stdduration) = true ]; - // slash_fraction_miss defines the fraction one validator should be punished for msissing reporting price - bytes slash_fraction_miss = 5 [ - (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Dec", - (gogoproto.nullable) = false, - (amino.dont_omitempty) = true - ]; // slash_fraction_malicious defines the fraction one validator should be punished for malicious behavior - bytes slash_fraction_malicious = 6 [ + bytes slash_fraction_malicious = 5 [ (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Dec", (gogoproto.nullable) = false, (amino.dont_omitempty) = true diff --git a/proto/exocore/oracle/v1/query.proto b/proto/exocore/oracle/v1/query.proto index 7d5a5a0da..3faebcce4 100644 --- a/proto/exocore/oracle/v1/query.proto +++ b/proto/exocore/oracle/v1/query.proto @@ -120,8 +120,10 @@ message QueryStakerListRequest { // QueryStakerListResponse is response type for Query/StakerList RPC method message QueryStakerListResponse { + // version of the staker validator list changes + int64 version = 1; // staker list including all stakers of request asset - StakerList staker_list = 1; + StakerList staker_list = 2; } // QueryStakerInfoRequest is request type for Query/StakerInfo RCP method @@ -134,8 +136,10 @@ message QueryStakerInfoRequest { // QueryStakerInfoResponse is response type for Query/StakerInfo RCP method message QueryStakerInfoResponse { + // version of the staker validator list changes + int64 version = 1; // all staker infos under the specified asset - StakerInfo staker_info = 1; + StakerInfo staker_info = 2; } // QueryStakerInfosRequest is request type for Query/StakerInfos RCP method @@ -148,10 +152,12 @@ message QueryStakerInfosRequest { // QueryStakerInfosResponse is response type for Query/StakerInfo RCP method message QueryStakerInfosResponse { + // version of the staker validator list changes + int64 version = 1; // all staker infos under the specified asset - repeated StakerInfo staker_infos = 1; + repeated StakerInfo staker_infos = 2; // pagination defines the pagination in the response. - cosmos.base.query.v1beta1.PageResponse pagination = 2; + cosmos.base.query.v1beta1.PageResponse pagination = 3; } // QueryParamsRequest is request type for the Query/Params RPC method. diff --git a/scripts/protoc-swagger-gen.sh b/scripts/protoc-swagger-gen.sh index 128fb042b..94c207ba1 100755 --- a/scripts/protoc-swagger-gen.sh +++ b/scripts/protoc-swagger-gen.sh @@ -35,6 +35,7 @@ cd .. # combine swagger files # uses nodejs package `swagger-combine`. # all the individual swagger files need to be configured in `config.json` for merging +echo "Combining swagger files" swagger-combine ./client/docs/config.json -o ./client/docs/swagger-ui/swagger.json -f json --continueOnConflictingPaths true --includeDefinitions true # clean swagger files diff --git a/tests/e2e/bank/bank_test.go b/tests/e2e/bank/bank_test.go deleted file mode 100644 index 773cf5681..000000000 --- a/tests/e2e/bank/bank_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package bank - -import ( - "testing" - - "github.com/ExocoreNetwork/exocore/testutil/network" - "github.com/stretchr/testify/suite" -) - -func TestE2ETestSuite(t *testing.T) { - cfg := network.DefaultConfig() - cfg.NumValidators = 1 - cfg.CleanupDir = true - cfg.EnableTMLogging = false - suite.Run(t, NewE2ETestSuite(cfg)) -} diff --git a/tests/e2e/bank/query.go b/tests/e2e/bank/query.go deleted file mode 100644 index a4a7f237b..000000000 --- a/tests/e2e/bank/query.go +++ /dev/null @@ -1,18 +0,0 @@ -package bank - -import ( - "github.com/ExocoreNetwork/exocore/tests/e2e" - sdk "github.com/cosmos/cosmos-sdk/types" -) - -// TestQueryBalance verifies that the native coin balance query returns the expected -// account balance for validators. It checks that: -// - The balance matches the network configuration -// - The returned coin denomination and amount are correct -// - The balance can be properly parsed into a native coin -func (s *E2ETestSuite) TestQueryBalance() { - res, err := e2e.QueryNativeCoinBalance(s.network.Validators[0].Address, s.network) - s.Require().NoError(err) - s.Require().Equal(sdk.NewCoin(s.network.Config.NativeDenom, s.network.Config.AccountTokens), *res.Balance) - s.Require().Equal(e2e.NewNativeCoin(s.network.Config.AccountTokens, s.network), *res.Balance) -} diff --git a/tests/e2e/bank/suite.go b/tests/e2e/bank/suite.go deleted file mode 100644 index 1566f26fd..000000000 --- a/tests/e2e/bank/suite.go +++ /dev/null @@ -1,26 +0,0 @@ -package bank - -import ( - "github.com/ExocoreNetwork/exocore/testutil/network" - "github.com/stretchr/testify/suite" -) - -type E2ETestSuite struct { - suite.Suite - - cfg network.Config - network *network.Network -} - -func NewE2ETestSuite(cfg network.Config) *E2ETestSuite { - return &E2ETestSuite{cfg: cfg} -} - -func (s *E2ETestSuite) SetupSuite() { - s.T().Log("setting up e2e test suite") - var err error - s.network, err = network.New(s.T(), s.T().TempDir(), s.cfg) - s.Require().NoError(err) - _, err = s.network.WaitForHeight(2) - s.Require().NoError(err) -} diff --git a/tests/e2e/bank/test.result b/tests/e2e/bank/test.result deleted file mode 100644 index e69de29bb..000000000 diff --git a/tests/e2e/bank/tx.go b/tests/e2e/bank/tx.go deleted file mode 100644 index e84020f09..000000000 --- a/tests/e2e/bank/tx.go +++ /dev/null @@ -1,50 +0,0 @@ -package bank - -import ( - sdkmath "cosmossdk.io/math" - "github.com/ExocoreNetwork/exocore/tests/e2e" - sdk "github.com/cosmos/cosmos-sdk/types" - banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" -) - -func (s *E2ETestSuite) TestSendCoin() { - kr := s.network.Validators[0].ClientCtx.Keyring - // generate a new account with ethsecp256k1 to receive/send native coins (hua) - toAddr, err := e2e.GenerateAccAddress(kr, "user1") - s.Require().NoError(err) - // generate sendCoin msg - fromAddr := s.network.Validators[0].Address - msg := banktypes.NewMsgSend(fromAddr, toAddr, sdk.NewCoins(sdk.NewCoin(s.network.Config.NativeDenom, sdkmath.NewInt(2000000)))) - - // send sendCoinMsg - err = s.network.SendTx([]sdk.Msg{msg}, s.network.Validators[0].ClientCtx.FromName, kr) - s.Require().NoError(err) - - // wait to next block for tx to be included - err = s.network.WaitForNextBlock() - s.Require().NoError(err) - - // check user1's balance - res, err := e2e.QueryNativeCoinBalance(toAddr, s.network) - s.Require().NoError(err) - s.Require().Equal(e2e.NewNativeCoin(sdkmath.NewInt(2000000), s.network), *res.Balance) - - toAddr2, err := e2e.GenerateAccAddress(kr, "user2") - s.Require().NoError(err) - - msg = banktypes.NewMsgSend(toAddr, toAddr2, sdk.NewCoins(sdk.NewCoin(s.network.Config.NativeDenom, sdkmath.NewInt(100)))) - // send sendCoinMsg - err = s.network.SendTx([]sdk.Msg{msg}, "user1", kr) - s.Require().NoError(err) - - // wait to next block for tx to be included - err = s.network.WaitForNextBlock() - s.Require().NoError(err) - err = s.network.WaitForNextBlock() - s.Require().NoError(err) - - // check user2's balance - res, err = e2e.QueryNativeCoinBalance(toAddr2, s.network) - s.Require().NoError(err) - s.Require().Equal(e2e.NewNativeCoin(sdkmath.NewInt(100), s.network), *res.Balance) -} diff --git a/tests/e2e/basesuite.go b/tests/e2e/basesuite.go deleted file mode 100644 index aa19de429..000000000 --- a/tests/e2e/basesuite.go +++ /dev/null @@ -1,26 +0,0 @@ -package e2e - -import ( - "github.com/ExocoreNetwork/exocore/testutil/network" - "github.com/stretchr/testify/suite" -) - -type BaseSuite struct { - suite.Suite - - Cfg network.Config - Network *network.Network -} - -func NewBaseSuite(cfg network.Config) *BaseSuite { - return &BaseSuite{Cfg: cfg} -} - -func (s *BaseSuite) SetupSuite() { - s.T().Log("setting up e2e test suite") - var err error - s.Network, err = network.New(s.T(), s.T().TempDir(), s.Cfg) - s.Require().NoError(err) - _, err = s.Network.WaitForHeight(2) - s.Require().NoError(err) -} diff --git a/tests/e2e/oracle/create_price.go b/tests/e2e/oracle/create_price.go index ffed6fdea..5b129444d 100644 --- a/tests/e2e/oracle/create_price.go +++ b/tests/e2e/oracle/create_price.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "math/big" + "os" "time" sdkmath "cosmossdk.io/math" @@ -18,6 +19,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) @@ -45,23 +47,90 @@ func (s *E2ETestSuite) TestCreatePrice() { kr3 = s.network.Validators[3].ClientCtx.Keyring creator3 = sdk.AccAddress(s.network.Validators[3].PubKey.Address()) - // we combine all test cases into one big case to avoid reset the network multiple times - s.testRegisterTokenThroughPrecompile() - s.testCreatePriceNST() - s.testCreatePriceLST() - s.testSlashing() + // we combine all test cases into one big case to avoid reset the network multiple times, the order can't be changed + + option := os.Getenv("TEST_OPTION") + if option == "local" { + s.testRecoveryCases(10) + } else { + s.testRegisterTokenThroughPrecompile() + s.testCreatePriceNST() + s.testCreatePriceLST() + s.testSlashing() + s.testCreatePriceLSTAfterDelegationChangePower() + } +} + +func (s *E2ETestSuite) testCreatePriceLSTAfterDelegationChangePower() { + s.moveToAndCheck(80) + priceTest1R1 := price2.updateTimestamp() + priceTimeDetID1R1 := priceTest1R1.getPriceTimeDetID("9") + priceSource1R1 := oracletypes.PriceSource{ + SourceID: 1, + Prices: []*oracletypes.PriceTimeDetID{ + &priceTimeDetID1R1, + }, + } + + // send create-price from validator-0 + msg0 := oracletypes.NewMsgCreatePrice(creator0.String(), 1, []*oracletypes.PriceSource{&priceSource1R1}, 80, 1) + err := s.network.SendTxOracleCreateprice([]sdk.Msg{msg0}, "valconskey0", kr0) + s.Require().NoError(err) + + // send create-price from validator-1 + msg1 := oracletypes.NewMsgCreatePrice(creator1.String(), 1, []*oracletypes.PriceSource{&priceSource1R1}, 80, 1) + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg1}, "valconskey1", kr1) + s.Require().NoError(err) + + s.moveToAndCheck(82) + res, err := s.network.QueryOracle().LatestPrice(ctxWithHeight(81), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.NoError(err) + s.Require().Equal(res.Price.Price, price1.Price) + + s.moveToAndCheck(85) + clientChainID := uint32(101) + lzNonce := uint64(0) + assetAddr, _ := hexutil.Decode(network.ETHAssetAddress) + stakerAddr := []byte(s.network.Validators[0].Address) + operatorAddr := []byte(s.network.Validators[0].Address.String()) + opAmount := big.NewInt(90000000) + // deposit 32 NSTETH to staker from beaconchain_validatro_1 + err = s.network.SendPrecompileTx(network.DELEGATION, "delegate", clientChainID, lzNonce, assetAddr, stakerAddr, operatorAddr, opAmount) + s.Require().NoError(err) + + // wait for validator set update + s.moveToAndCheck(120) + + // send create-price from validator-0 + msg0 = oracletypes.NewMsgCreatePrice(creator0.String(), 1, []*oracletypes.PriceSource{&priceSource1R1}, 120, 1) + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg0}, "valconskey0", kr0) + s.Require().NoError(err) + + // send create-price from validator-1 + msg1 = oracletypes.NewMsgCreatePrice(creator1.String(), 1, []*oracletypes.PriceSource{&priceSource1R1}, 120, 1) + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg1}, "valconskey1", kr1) + s.Require().NoError(err) + + s.moveToAndCheck(122) + // query final price. query state of 11 on height 12 + res, err = s.network.QueryOracle().LatestPrice(ctxWithHeight(121), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.Require().NoError(err) + + ret := priceTest1R1.getPriceTimeRound(12) + ret.Timestamp = res.Price.Timestamp + s.Require().Equal(ret, res.Price) } /* cases: - we need more than 2/3 power, so that at least 3 out of 4 validators power should be enough - 1. block_1_1: v1 sendPrice{p1}, [no round_1 price after block_1_1 committed], block_1_2:v2&v3 sendPrice{p1}, [got round_1 price{p1} after block_1_2 committed] - 2. block_2_1: v3 sendPrice{p2}, block_2_2: v1 sendPrice{p2}, [no round_2 price after block_2_2 committed], block_2_3:nothing, [got round_2 price{p1} equals to round_1 after block_2_3 committed] - 3. block_3_1: v1 sendPrice{p1}, block_3_2: v2&v3 sendPrice{p2}, block_3_3: v3 sendPrice{p2}, [got final price{p2} after block_3_3 committed] - 4. block_4_1: v1&v2&v3 sendPrice{p1}, [got round_4 price{p1} after block_4_1 committed]] +we need more than 2/3 power, so that at least 3 out of 4 validators power should be enough +1. block_1_1: v1 sendPrice{p1}, [no round_1 price after block_1_1 committed], block_1_2:v2&v3 sendPrice{p1}, [got round_1 price{p1} after block_1_2 committed] +2. block_2_1: v3 sendPrice{p2}, block_2_2: v1 sendPrice{p2}, [no round_2 price after block_2_2 committed], block_2_3:nothing, [got round_2 price{p1} equals to round_1 after block_2_3 committed] +3. block_3_1: v1 sendPrice{p1}, block_3_2: v2&v3 sendPrice{p2}, block_3_3: v3 sendPrice{p2}, [got final price{p2} after block_3_3 committed] +4. block_4_1: v1&v2&v3 sendPrice{p1}, [got round_4 price{p1} after block_4_1 committed]] - --- nonce: +--- nonce: */ func (s *E2ETestSuite) testCreatePriceLST() { priceTest1R1 := price1.updateTimestamp() @@ -81,11 +150,7 @@ func (s *E2ETestSuite) testCreatePriceLST() { err := s.network.SendTxOracleCreateprice([]sdk.Msg{msg0}, "valconskey0", kr0) s.Require().NoError(err) - s.moveNAndCheck(1) - // query final price - _, err = s.network.QueryOracle().LatestPrice(context.Background(), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) - errStatus, _ := status.FromError(err) - s.Require().Equal(codes.NotFound, errStatus.Code()) + s.moveToAndCheck(11) // send create-price from validator-1 msg1 := oracletypes.NewMsgCreatePrice(creator1.String(), 1, []*oracletypes.PriceSource{&priceSource1R1}, 10, 1) @@ -97,11 +162,7 @@ func (s *E2ETestSuite) testCreatePriceLST() { err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg2}, "valconskey2", kr2) s.Require().NoError(err) - s.moveNAndCheck(1) - // query final price - res, err := s.network.QueryOracle().LatestPrice(context.Background(), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) - s.Require().NoError(err) - s.Require().Equal(priceTest1R1.getPriceTimeRound(1), res.Price) + s.moveToAndCheck(12) // TODO: there might be a small chance that the blockHeight grows to more than 13, try bigger price window(nonce>3) to be more confident // send create-price from validator3 to avoid being slashed for downtime @@ -109,6 +170,20 @@ func (s *E2ETestSuite) testCreatePriceLST() { err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg3}, "valconskey3", kr3) s.Require().NoError(err) + // query final price. query state of 11 on height 12 + _, err = s.network.QueryOracle().LatestPrice(ctxWithHeight(11), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + errStatus, _ := status.FromError(err) + s.Require().Equal(codes.NotFound, errStatus.Code()) + + s.moveToAndCheck(13) + // query final price. query state of 12 on height 13 + res, err := s.network.QueryOracle().LatestPrice(ctxWithHeight(12), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.Require().NoError(err) + // NOTE: update timestamp manually to ignore + ret := priceTest1R1.getPriceTimeRound(1) + ret.Timestamp = res.Price.Timestamp + s.Require().Equal(ret, res.Price) + // case_2. slashing{miss_v3:1, window:2} [1.0] // timestamp need to be updated priceTest2R2 := price2.updateTimestamp() @@ -121,21 +196,22 @@ func (s *E2ETestSuite) testCreatePriceLST() { } msg0 = oracletypes.NewMsgCreatePrice(creator0.String(), 1, []*oracletypes.PriceSource{&priceSource2R2}, 20, 1) msg2 = oracletypes.NewMsgCreatePrice(creator2.String(), 1, []*oracletypes.PriceSource{&priceSource2R2}, 20, 1) - s.moveToAndCheck(20) // send price{p2} from validator-2 err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg2}, "valconskey2", kr2) s.Require().NoError(err) - s.moveNAndCheck(1) + s.moveToAndCheck(21) // send price{p2} from validator-0 err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg0}, "valconskey0", kr0) s.Require().NoError(err) - s.moveToAndCheck(23) - res, err = s.network.QueryOracle().LatestPrice(context.Background(), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.moveToAndCheck(24) + res, err = s.network.QueryOracle().LatestPrice(ctxWithHeight(23), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) s.Require().NoError(err) // price update fail, round 2 still have price{p1} - s.Require().Equal(priceTest1R1.getPriceTimeRound(2), res.Price) - + // NOTE: update timestamp manually to ignore + ret = priceTest1R1.getPriceTimeRound(2) + ret.Timestamp = res.Price.Timestamp + s.Require().Equal(ret, res.Price) // case_3. slashing_{miss_v3:2, window:3} [1.0.1] // update timestamp priceTest2R3 := price2.updateTimestamp() @@ -146,7 +222,6 @@ func (s *E2ETestSuite) testCreatePriceLST() { &priceTimeDetID2R3, }, } - msg0 = oracletypes.NewMsgCreatePrice(creator0.String(), 1, []*oracletypes.PriceSource{&priceSource2R3}, 30, 1) msg1 = oracletypes.NewMsgCreatePrice(creator1.String(), 1, []*oracletypes.PriceSource{&priceSource2R3}, 30, 1) msg2 = oracletypes.NewMsgCreatePrice(creator2.String(), 1, []*oracletypes.PriceSource{&priceSource2R3}, 30, 1) @@ -154,7 +229,7 @@ func (s *E2ETestSuite) testCreatePriceLST() { // send price{p2} from validator-0 err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg0}, "valconskey0", kr0) s.Require().NoError(err) - s.moveNAndCheck(1) + s.moveToAndCheck(31) // send price{p2} from validator-1 err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg1}, "valconskey1", kr1) s.Require().NoError(err) @@ -163,11 +238,14 @@ func (s *E2ETestSuite) testCreatePriceLST() { err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg2}, "valconskey2", kr2) s.Require().NoError(err) - s.moveNAndCheck(1) - res, err = s.network.QueryOracle().LatestPrice(context.Background(), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.moveToAndCheck(33) + res, err = s.network.QueryOracle().LatestPrice(ctxWithHeight(32), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) s.Require().NoError(err) // price updated, round 3 has price{p2} - s.Require().Equal(priceTest2R3.getPriceTimeRound(3), res.Price) + // NOTE: update timestamp manually to ignore + ret = priceTest2R3.getPriceTimeRound(3) + ret.Timestamp = res.Price.Timestamp + s.Require().Equal(ret, res.Price) // case_4. slashing_{miss_v3:2, window:4}.maxWindow=4 [1.0.1.0] // update timestamp @@ -182,15 +260,22 @@ func (s *E2ETestSuite) testCreatePriceLST() { s.Require().NoError(err) err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg2}, "valconskey2", kr2) s.Require().NoError(err) - s.moveNAndCheck(1) - res, err = s.network.QueryOracle().LatestPrice(context.Background(), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) - s.Require().NoError(err) - // price updated, round 4 has price{p1} - s.Require().Equal(priceTest1R4.getPriceTimeRound(4), res.Price) + + s.moveToAndCheck(41) // send create-price from validator3 to avoid being slashed for downtime msg3 = oracletypes.NewMsgCreatePrice(creator3.String(), 1, []*oracletypes.PriceSource{&priceSource1R4}, 40, 1) err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg3}, "valconskey3", kr3) s.Require().NoError(err) + + s.moveToAndCheck(42) + + res, err = s.network.QueryOracle().LatestPrice(ctxWithHeight(41), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.Require().NoError(err) + // price updated, round 4 has price{p1} + // NOTE: update timestamp manually to ignore + ret = priceTest1R4.getPriceTimeRound(4) + ret.Timestamp = res.Price.Timestamp + s.Require().Equal(ret, res.Price) } func (s *E2ETestSuite) testCreatePriceNST() { @@ -207,28 +292,38 @@ func (s *E2ETestSuite) testCreatePriceNST() { // deposit 32 NSTETH to staker from beaconchain_validatro_1 err = s.network.SendPrecompileTx(network.ASSETS, "depositNST", clientChainID, validatorPubkey, stakerAddr, opAmount) s.Require().NoError(err) - s.moveNAndCheck(1) ctx := context.Background() - // check stakerAssetInfo + + // slashing_{miss_v3:1, window:1} [1] + s.moveToAndCheck(7) + _, ps := priceNST1.generateRealTimeStructs("100_1", 1) + msg0 := oracletypes.NewMsgCreatePrice(creator0.String(), 2, []*oracletypes.PriceSource{&ps}, 7, 1) + msg1 := oracletypes.NewMsgCreatePrice(creator1.String(), 2, []*oracletypes.PriceSource{&ps}, 7, 1) + msg2 := oracletypes.NewMsgCreatePrice(creator2.String(), 2, []*oracletypes.PriceSource{&ps}, 7, 1) + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg0}, "valconskey0", kr0) + s.Require().NoError(err) + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg1}, "valconskey1", kr1) + s.Require().NoError(err) + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg2}, "valconskey2", kr2) + s.Require().NoError(err) + + // on height 7, the state from 6 is committed and confirmed res, err := s.network.QueryAssets().QueStakerSpecifiedAssetAmount(ctx, &assetstypes.QuerySpecifiedAssetAmountReq{StakerId: stakerID, AssetId: network.NativeAssetID}) + resStakerList, err2 := s.network.QueryOracle().StakerList(ctx, &oracletypes.QueryStakerListRequest{AssetId: network.NativeAssetID}) + resStakerInfo, err3 := s.network.QueryOracle().StakerInfo(ctx, &oracletypes.QueryStakerInfoRequest{AssetId: network.NativeAssetID, StakerAddr: stakerAddrStr}) s.Require().NoError(err) s.Require().Equal(assetstypes.StakerAssetInfo{ TotalDepositAmount: sdkmath.NewInt(32), WithdrawableAmount: sdkmath.NewInt(32), PendingUndelegationAmount: sdkmath.ZeroInt(), }, *res) - // check stakerList from oracle had been updated successfully - resStakerList, err := s.network.QueryOracle().StakerList(ctx, &oracletypes.QueryStakerListRequest{AssetId: network.NativeAssetID}) - s.Require().NoError(err) + s.Require().NoError(err2) s.Require().Equal(oracletypes.StakerList{ StakerAddrs: []string{ stakerAddrStr, }, }, *resStakerList.StakerList) - - // check stakerInfo from oracle had been updated successfully - resStakerInfo, err := s.network.QueryOracle().StakerInfo(ctx, &oracletypes.QueryStakerInfoRequest{AssetId: network.NativeAssetID, StakerAddr: stakerAddrStr}) - s.Require().NoError(err) + s.Require().NoError(err3) s.Require().Equal(oracletypes.StakerInfo{ StakerAddr: stakerAddrStr, StakerIndex: 0, @@ -246,31 +341,21 @@ func (s *E2ETestSuite) testCreatePriceNST() { }, }, *resStakerInfo.StakerInfo) - // slashing_{miss_v3:1, window:1} [1] - s.moveToAndCheck(7) - _, ps := priceNST1.generateRealTimeStructs("100", 1) - msg0 := oracletypes.NewMsgCreatePrice(creator0.String(), 2, []*oracletypes.PriceSource{&ps}, 7, 1) - msg1 := oracletypes.NewMsgCreatePrice(creator1.String(), 2, []*oracletypes.PriceSource{&ps}, 7, 1) - msg2 := oracletypes.NewMsgCreatePrice(creator2.String(), 2, []*oracletypes.PriceSource{&ps}, 7, 1) - err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg0}, "valconskey0", kr0) - s.Require().NoError(err) - err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg1}, "valconskey1", kr1) - s.Require().NoError(err) - err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg2}, "valconskey2", kr2) - s.Require().NoError(err) - - s.moveNAndCheck(1) + // new block - 9, state of 8 is committed + s.moveToAndCheck(9) resStakerInfo, err = s.network.QueryOracle().StakerInfo(ctx, &oracletypes.QueryStakerInfoRequest{AssetId: network.NativeAssetID, StakerAddr: stakerAddrStr}) s.Require().NoError(err) s.Require().Equal(2, len(resStakerInfo.StakerInfo.BalanceList)) s.Require().Equal([]*oracletypes.BalanceInfo{ { Block: 6, + Index: 0, Balance: 32, Change: oracletypes.Action_ACTION_DEPOSIT, }, { RoundID: 1, + Index: 1, Block: 8, Balance: 28, Change: oracletypes.Action_ACTION_SLASH_REFUND, @@ -295,11 +380,15 @@ func (s *E2ETestSuite) testSlashing() { s.Require().NoError(err) err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg2}, "valconskey2", kr2) s.Require().NoError(err) - s.moveNAndCheck(1) - res, err := s.network.QueryOracle().LatestPrice(context.Background(), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.moveToAndCheck(52) + // query state of 51 on height 52 + res, err := s.network.QueryOracle().LatestPrice(ctxWithHeight(51), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) s.Require().NoError(err) // price updated, round 4 has price{p1} - s.Require().Equal(priceTest1R5.getPriceTimeRound(5), res.Price) + // NOTE: update timestamp manually to ignore + ret := priceTest1R5.getPriceTimeRound(5) + ret.Timestamp = res.Price.Timestamp + s.Require().Equal(ret, res.Price) s.moveToAndCheck(60) // slashing_{miss_v3:3, window:5} [0.1.0.1.1] -> {miss_v3:2, window:4} [1.0.1.1] _, priceSource1R6 := price1.generateRealTimeStructs("14", 1) @@ -312,8 +401,9 @@ func (s *E2ETestSuite) testSlashing() { s.Require().NoError(err) err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg2}, "valconskey2", kr2) s.Require().NoError(err) - s.moveToAndCheck(63) - resSigningInfo, err := s.network.QuerySlashing().SigningInfo(context.Background(), &slashingtypes.QuerySigningInfoRequest{ConsAddress: sdk.ConsAddress(s.network.Validators[3].PubKey.Address()).String()}) + s.moveToAndCheck(64) + // query state of 63 on height 64 + resSigningInfo, err := s.network.QuerySlashing().SigningInfo(ctxWithHeight(63), &slashingtypes.QuerySigningInfoRequest{ConsAddress: sdk.ConsAddress(s.network.Validators[3].PubKey.Address()).String()}) s.Require().NoError(err) // validator3 is jailed s.Require().True(resSigningInfo.ValSigningInfo.JailedUntil.After(time.Now())) @@ -323,7 +413,8 @@ func (s *E2ETestSuite) testSlashing() { s.Require().NoError(err) s.Require().True(resOperator.Jailed) // wait for validator3 to pass jail duration - time.Sleep(35 * time.Second) + // timeout commit is set to 2 seconds, 10 blocks about 20 seconds + s.moveToAndCheck(75) msgUnjail := slashingtypes.NewMsgUnjail(s.network.Validators[3].ValAddress) // unjail validator3 err = s.network.SendTx([]sdk.Msg{msgUnjail}, "node3", kr3) @@ -331,7 +422,6 @@ func (s *E2ETestSuite) testSlashing() { s.moveNAndCheck(2) resOperator, err = s.network.QueryOperator().QueryOptInfo(context.Background(), &operatortypes.QueryOptInfoRequest{OperatorAVSAddress: &operatortypes.OperatorAVSAddress{OperatorAddr: s.network.Validators[3].Address.String(), AvsAddress: avsAddr}}) s.Require().NoError(err) - fmt.Println("debug----->jailed:", resOperator.Jailed) s.Require().False(resOperator.Jailed) } @@ -349,15 +439,15 @@ func (s *E2ETestSuite) testRegisterTokenThroughPrecompile() { err := s.network.SendPrecompileTx(network.ASSETS, "registerToken", clientChainID, token, decimal, name, metaData, oracleInfo) s.Require().NoError(err) - s.moveNAndCheck(1) + s.moveToAndCheck(4) // registerToken will automaticlly register that token into oracle module - res, err := s.network.QueryOracle().Params(context.Background(), &oracletypes.QueryParamsRequest{}) + res, err := s.network.QueryOracle().Params(ctxWithHeight(3), &oracletypes.QueryParamsRequest{}) s.Require().NoError(err) - s.Require().Equal(name, res.Params.Tokens[3].Name) + s.Require().Equal(name, res.Params.Tokens[len(res.Params.Tokens)-1].Name) } func (s *E2ETestSuite) moveToAndCheck(height int64) { - _, err := s.network.WaitForStateHeightWithTimeout(height, 30*time.Second) + _, err := s.network.WaitForStateHeightWithTimeout(height, 120*time.Second) s.Require().NoError(err) } @@ -367,3 +457,8 @@ func (s *E2ETestSuite) moveNAndCheck(n int64) { s.Require().NoError(err) } } + +func ctxWithHeight(height int64) context.Context { + md := metadata.Pairs("x-cosmos-block-height", fmt.Sprintf("%d", height)) + return metadata.NewOutgoingContext(context.Background(), md) +} diff --git a/tests/e2e/oracle/data.go b/tests/e2e/oracle/data.go index 32bdbf871..55bb1b71e 100644 --- a/tests/e2e/oracle/data.go +++ b/tests/e2e/oracle/data.go @@ -1,12 +1,10 @@ package oracle import ( - "time" - oracletypes "github.com/ExocoreNetwork/exocore/x/oracle/types" ) -const layout = "2006-01-02 15:04:05" +var now = "2025-01-01 00:00:00" type priceTime struct { Price string @@ -33,8 +31,7 @@ func (p priceTime) getPriceTimeRound(roundID uint64) oracletypes.PriceTimeRound } func (p priceTime) updateTimestamp() priceTime { - t := time.Now().UTC().Format(layout) - p.Timestamp = t + p.Timestamp = now return p } @@ -55,22 +52,100 @@ func generateNSTPriceTime(sc [][]int) priceTime { return priceTime{ Price: string(rawBytes), Decimal: 0, - Timestamp: time.Now().UTC().Format(layout), + Timestamp: now, } } var ( price1 = priceTime{ - Price: "199999", - Decimal: 18, - Timestamp: time.Now().UTC().Format(layout), + Price: "1900000000", + Decimal: 8, + Timestamp: now, } price2 = priceTime{ - Price: "299999", - Decimal: 18, - Timestamp: time.Now().UTC().Format(layout), + Price: "290000000", + Decimal: 8, + Timestamp: now, } stakerChanges1 = [][]int{{0, -4}} priceNST1 = generateNSTPriceTime(stakerChanges1) + + // 1. detID:1, price: 123 + // 2. detID:1, price: 129 + // 3. detID:2, price: 127 + priceRecovery1 = oracletypes.PriceSource{ + SourceID: 1, + Prices: []*oracletypes.PriceTimeDetID{ + { + Price: "12300000000", + Decimal: 8, + DetID: "1", + Timestamp: now, + }, + }, + } + priceRecovery1_2 = oracletypes.PriceSource{ + SourceID: 1, + Prices: []*oracletypes.PriceTimeDetID{ + { + Price: "12300000000", + Decimal: 8, + DetID: "1", + Timestamp: now, + }, + { + Price: "12700000000", + Decimal: 8, + DetID: "2", + Timestamp: now, + }, + }, + } + + priceRecovery1_3 = oracletypes.PriceSource{ + SourceID: 1, + Prices: []*oracletypes.PriceTimeDetID{ + { + Price: "12300000000", + Decimal: 8, + DetID: "1", + Timestamp: now, + }, + { + Price: "12700000000", + Decimal: 8, + DetID: "2", + Timestamp: now, + }, + { + Price: "12900000000", + Decimal: 8, + DetID: "3", + Timestamp: now, + }, + }, + } + priceRecovery2 = oracletypes.PriceSource{ + SourceID: 1, + Prices: []*oracletypes.PriceTimeDetID{ + { + Price: "12700000000", + Decimal: 8, + DetID: "2", + Timestamp: now, + }, + }, + } + priceRecovery3 = oracletypes.PriceSource{ + SourceID: 1, + Prices: []*oracletypes.PriceTimeDetID{ + { + Price: "12900000000", + Decimal: 8, + DetID: "3", + Timestamp: now, + }, + }, + } ) diff --git a/tests/e2e/oracle/helper_nstconvert.go b/tests/e2e/oracle/helper_nstconvert.go index 57dcb6910..3cae1a4ff 100644 --- a/tests/e2e/oracle/helper_nstconvert.go +++ b/tests/e2e/oracle/helper_nstconvert.go @@ -2,6 +2,7 @@ package oracle import ( "encoding/binary" + "math" "strings" "github.com/imroc/biu" @@ -10,8 +11,7 @@ import ( func convertBalanceChangeToBytes(stakerChanges [][]int) []byte { if len(stakerChanges) == 0 { // length equals to 0 means that alls takers have efb of 32 with 0 changes - ret := make([]byte, 32) - return ret + return make([]byte, 32) } str := "" index := 0 @@ -23,6 +23,10 @@ func convertBalanceChangeToBytes(stakerChanges [][]int) []byte { // change amount -> bytes change := stakerChange[1] + if (change > 0 && change > math.MaxUint16) || + (change < 0 && (-1*change) > math.MaxUint16) { + return make([]byte, 32) + } var changeBytes []byte symbol := 1 if change < 0 { @@ -47,6 +51,7 @@ func convertBalanceChangeToBytes(stakerChanges [][]int) []byte { } else { // 2 byte changeBytes = make([]byte, 2) + // #nosec G115 // change has been checked to make sure no overflow binary.BigEndian.PutUint16(changeBytes, uint16(change)) moveLength := 16 - bits changeBytes[0] <<= moveLength diff --git a/tests/e2e/oracle/recovery.go b/tests/e2e/oracle/recovery.go new file mode 100644 index 000000000..dad07d799 --- /dev/null +++ b/tests/e2e/oracle/recovery.go @@ -0,0 +1,530 @@ +package oracle + +import ( + "math/big" + + "github.com/ExocoreNetwork/exocore/testutil/network" + oracletypes "github.com/ExocoreNetwork/exocore/x/oracle/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/ethereum/go-ethereum/common/hexutil" +) + +// the test cases run with 'devmode' flag, we try to elaborate all cases to check the recovery logic works fine in each scenario +// this could take some time since we will run for many tokenfeeder rounds to cover many cases +// +// comments explain: +// +// 1{1} means the first block includes one valid quote +// 1{1-} means the first block includes on invalid quote which is different with the expected final price(detID, price) +func (s *E2ETestSuite) testRecoveryCases(start int64) { + // 1.successfully aggregated, + // 1.1 all prices provided are the same(detID, price, decimal) + // 1{3}, 2{1}, 3 + s.moveToAndCheck(start) + // #nosec G115 -- block height is positive + startUint := uint64(start) + msg0 := oracletypes.NewMsgCreatePrice(creator0.String(), 1, []*oracletypes.PriceSource{&priceRecovery1}, startUint, 1) + msg1 := oracletypes.NewMsgCreatePrice(creator1.String(), 1, []*oracletypes.PriceSource{&priceRecovery1}, startUint, 1) + msg2 := oracletypes.NewMsgCreatePrice(creator2.String(), 1, []*oracletypes.PriceSource{&priceRecovery1}, startUint, 1) + msg3 := oracletypes.NewMsgCreatePrice(creator3.String(), 1, []*oracletypes.PriceSource{&priceRecovery1}, startUint, 1) + + err := s.network.SendTxOracleCreateprice([]sdk.Msg{msg0}, "valconsKey0", kr0) + s.Require().NoError(err) + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg1}, "valconsKey1", kr1) + s.Require().NoError(err) + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg2}, "valconsKey2", kr2) + s.Require().NoError(err) + + s.moveToAndCheck(start + 1) + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg3}, "valconsKey3", kr3) + s.Require().NoError(err) + + s.moveToAndCheck(start + 2) + res, err := s.network.QueryOracle().LatestPrice(ctxWithHeight(start+1), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.NoError(err) + s.Require().Equal(res.Price.Price, priceRecovery1.Prices[0].Price) + + // 1{1}, 2{3}, 3 + // init_start + 10 + start += 10 + startUint = uint64(start) + msg0_1 := oracletypes.NewMsgCreatePrice(creator0.String(), 1, []*oracletypes.PriceSource{&priceRecovery2}, startUint, 1) + msg1_1 := oracletypes.NewMsgCreatePrice(creator1.String(), 1, []*oracletypes.PriceSource{&priceRecovery2}, startUint, 1) + msg2_1 := oracletypes.NewMsgCreatePrice(creator2.String(), 1, []*oracletypes.PriceSource{&priceRecovery2}, startUint, 1) + msg3_1 := oracletypes.NewMsgCreatePrice(creator3.String(), 1, []*oracletypes.PriceSource{&priceRecovery2}, startUint, 1) + + s.moveToAndCheck(start) + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg0_1}, "valconsKey0", kr0) + s.Require().NoError(err) + + s.moveToAndCheck(start + 1) + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg1_1}, "valconsKey1", kr1) + s.Require().NoError(err) + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg2_1}, "valconsKey2", kr2) + s.Require().NoError(err) + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg3_1}, "valconsKey3", kr3) + s.Require().NoError(err) + + s.moveToAndCheck(start + 3) + + res, err = s.network.QueryOracle().LatestPrice(ctxWithHeight(start+1), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.NoError(err) + // price not updated yet + s.Require().Equal(res.Price.Price, priceRecovery1.Prices[0].Price) + + res, err = s.network.QueryOracle().LatestPrice(ctxWithHeight(start+2), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.NoError(err) + // price updated from priceRecovery1 to priceRecovery2 + s.Require().Equal(res.Price.Price, priceRecovery2.Prices[0].Price) + + // 1{1}, 2, 3{3} + // init_start + 20 + start += 10 + startUint = uint64(start) + + s.moveToAndCheck(start) + msg0.BasedBlock = startUint + msg1.BasedBlock = startUint + msg2.BasedBlock = startUint + msg3.BasedBlock = startUint + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg0}, "valconsKey0", kr0) + s.Require().NoError(err) + + s.moveToAndCheck(start + 2) + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg1}, "valconsKey1", kr1) + s.Require().NoError(err) + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg2}, "valconsKey2", kr2) + s.Require().NoError(err) + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg3}, "valconsKey3", kr3) + s.Require().NoError(err) + + s.moveToAndCheck(start + 4) + + res, err = s.network.QueryOracle().LatestPrice(ctxWithHeight(start+2), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.NoError(err) + // price not updated yet + s.Require().Equal(res.Price.Price, priceRecovery2.Prices[0].Price) + + res, err = s.network.QueryOracle().LatestPrice(ctxWithHeight(start+3), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.NoError(err) + // price updated from priceRecovery2 to priceRecovery1 + s.Require().Equal(res.Price.Price, priceRecovery1.Prices[0].Price) + + // 1{1}, 2{1}, 3{1} + // init_start + 30 + start += 10 + startUint = uint64(start) + s.moveToAndCheck(start) + msg1_1.BasedBlock = startUint + msg2_1.BasedBlock = startUint + msg3_1.BasedBlock = startUint + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg1_1}, "valconsKey1", kr1) + s.Require().NoError(err) + + s.moveToAndCheck(start + 1) + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg2_1}, "valconsKey2", kr2) + s.Require().NoError(err) + + s.moveToAndCheck(start + 2) + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg3_1}, "valconsKey3", kr3) + s.Require().NoError(err) + + s.moveToAndCheck(start + 4) + + res, err = s.network.QueryOracle().LatestPrice(ctxWithHeight(start+2), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.Require().NoError(err) + // price not updated yet + s.Require().Equal(res.Price.Price, priceRecovery1.Prices[0].Price) + + res, err = s.network.QueryOracle().LatestPrice(ctxWithHeight(start+3), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.Require().NoError(err) + // price updated from priceRecovery1 to priceRecovery2 + s.Require().Equal(res.Price.Price, priceRecovery2.Prices[0].Price) + + // 1{1}, 2{1}, 3{2} + // init_start+40 + start += 10 + startUint = uint64(start) + s.moveToAndCheck(start) + msg1.BasedBlock = startUint + msg2.BasedBlock = startUint + msg3.BasedBlock = startUint + msg0.BasedBlock = startUint + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg1}, "valconsKey1", kr1) + s.Require().NoError(err) + + s.moveToAndCheck(start + 1) + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg2}, "valconsKey2", kr2) + s.Require().NoError(err) + + s.moveToAndCheck(start + 2) + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg3}, "valconsKey3", kr3) + s.Require().NoError(err) + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg0}, "valconsKey0", kr0) + s.Require().NoError(err) + + s.moveToAndCheck(start + 4) + + res, err = s.network.QueryOracle().LatestPrice(ctxWithHeight(start+2), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.Require().NoError(err) + // price not updated yet + s.Require().Equal(res.Price.Price, priceRecovery2.Prices[0].Price) + + res, err = s.network.QueryOracle().LatestPrice(ctxWithHeight(start+3), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.Require().NoError(err) + // price updated from priceRecovery2 to priceRecovery1 + s.Require().Equal(res.Price.Price, priceRecovery1.Prices[0].Price) + + // 1{1}, 2{2}, 3{1} + // init_start+50 + start += 10 + startUint = uint64(start) + s.moveToAndCheck(start) + msg1_1.BasedBlock = startUint + msg2_1.BasedBlock = startUint + msg3_1.BasedBlock = startUint + msg0_1.BasedBlock = startUint + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg1_1}, "valconsKey1", kr1) + s.Require().NoError(err) + + s.moveToAndCheck(start + 1) + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg2_1}, "valconsKey2", kr2) + s.Require().NoError(err) + + s.moveToAndCheck(start + 2) + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg3_1}, "valconsKey3", kr3) + s.Require().NoError(err) + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg0_1}, "valconsKey0", kr0) + s.Require().NoError(err) + + s.moveToAndCheck(start + 4) + + res, err = s.network.QueryOracle().LatestPrice(ctxWithHeight(start+2), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.Require().NoError(err) + // price not updated yet + s.Require().Equal(res.Price.Price, priceRecovery1.Prices[0].Price) + + res, err = s.network.QueryOracle().LatestPrice(ctxWithHeight(start+3), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.Require().NoError(err) + // price updated from priceRecovery1 to priceRecovery2 + s.Require().Equal(res.Price.Price, priceRecovery2.Prices[0].Price) + + // 1{1}, 2{2}, 3{1} + // init_start+60 + start += 10 + startUint = uint64(start) + s.moveToAndCheck(start) + msg1.BasedBlock = startUint + msg2.BasedBlock = startUint + msg0.BasedBlock = startUint + msg3.BasedBlock = startUint + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg1}, "valconsKey1", kr1) + s.Require().NoError(err) + + s.moveToAndCheck(start + 1) + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg2}, "valconsKey2", kr2) + s.Require().NoError(err) + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg0}, "valconsKey0", kr0) + s.Require().NoError(err) + + s.moveToAndCheck(start + 2) + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg3}, "valconsKey3", kr3) + s.Require().NoError(err) + + s.moveToAndCheck(start + 3) + + res, err = s.network.QueryOracle().LatestPrice(ctxWithHeight(start+1), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.Require().NoError(err) + // price not updated yet + s.Require().Equal(res.Price.Price, priceRecovery2.Prices[0].Price) + + res, err = s.network.QueryOracle().LatestPrice(ctxWithHeight(start+2), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.Require().NoError(err) + // price updated from priceRecovery2 to priceRecovery1 + s.Require().Equal(res.Price.Price, priceRecovery1.Prices[0].Price) + + // 1{2}, 2{2}, mixed prices + // init_start+70 + start += 10 + startUint = uint64(start) + s.moveToAndCheck(start) + msg1_2 := oracletypes.NewMsgCreatePrice(creator1.String(), 1, []*oracletypes.PriceSource{&priceRecovery1_3}, startUint, 1) + msg2_2 := oracletypes.NewMsgCreatePrice(creator2.String(), 1, []*oracletypes.PriceSource{&priceRecovery1_2}, startUint, 1) + msg3_2 := oracletypes.NewMsgCreatePrice(creator3.String(), 1, []*oracletypes.PriceSource{&priceRecovery3}, startUint, 1) + // msg0_2 := oracletypes.NewMsgCreatePrice(creator0.String(), 1, []*oracletypes.PriceSource{&priceRecovery3}, startUint, 1) + msg0_1.BasedBlock = startUint + + // id:1,2,3 + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg1_2}, "valconsKey1", kr1) + s.Require().NoError(err) + + // id:3 + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg3_2}, "valconsKey3", kr3) + s.Require().NoError(err) + + s.moveToAndCheck(start + 1) + + // id:1,2 + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg2_2}, "valconsKey2", kr2) + s.Require().NoError(err) + + // id:2 + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg0_1}, "valconsKey0", kr0) + s.Require().NoError(err) + + s.moveToAndCheck(start + 3) + + res, err = s.network.QueryOracle().LatestPrice(ctxWithHeight(start+1), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.Require().NoError(err) + // price not updated yet + s.Require().Equal(res.Price.Price, priceRecovery1.Prices[0].Price) + + res, err = s.network.QueryOracle().LatestPrice(ctxWithHeight(start+2), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.Require().NoError(err) + // price updated from priceRecovery1 to priceRecovery2 + s.Require().Equal(res.Price.Price, priceRecovery2.Prices[0].Price) + + // 1{2}, 2, 3{2} + // init_start+80 + start += 10 + startUint = uint64(start) + s.moveToAndCheck(start) + msg3_2.BasedBlock = startUint + msg1_2.BasedBlock = startUint + msg2_2.BasedBlock = startUint + msg0.BasedBlock = startUint + // id:3 + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg3_2}, "valconsKey3", kr3) + s.Require().NoError(err) + + // id:1,2,3 + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg1_2}, "valconsKey1", kr1) + s.Require().NoError(err) + + s.moveToAndCheck(start + 2) + + // id:1,2 + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg2_2}, "valconsKey2", kr2) + s.Require().NoError(err) + + // id:1 + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg0}, "valconsKey0", kr0) + s.Require().NoError(err) + + s.moveToAndCheck(start + 4) + + res, err = s.network.QueryOracle().LatestPrice(ctxWithHeight(start+1), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.Require().NoError(err) + // price not updated yet + s.Require().Equal(res.Price.Price, priceRecovery2.Prices[0].Price) + + res, err = s.network.QueryOracle().LatestPrice(ctxWithHeight(start+3), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.Require().NoError(err) + // price updated from priceRecovery2 to priceRecovery1 + s.Require().Equal(res.Price.Price, priceRecovery1.Prices[0].Price) + + // 1{2}, 2{2}, 3. mixed prices + // init_start+90 + start += 10 + startUint = uint64(start) + s.moveToAndCheck(start) + msg3_2.BasedBlock = startUint + msg1_2.BasedBlock = startUint + msg2_2.BasedBlock = startUint + msg0_2 := oracletypes.NewMsgCreatePrice(creator0.String(), 1, []*oracletypes.PriceSource{&priceRecovery3}, startUint, 1) + // msg0_2.BasedBlock = startUint + // id:3 + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg3_2}, "valconsKey3", kr3) + s.Require().NoError(err) + + // id:1,2,3 + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg1_2}, "valconsKey1", kr1) + s.Require().NoError(err) + + s.moveToAndCheck(start + 1) + + // id:1,2 + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg2_2}, "valconsKey2", kr2) + s.Require().NoError(err) + + // id:3 + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg0_2}, "valconsKey0", kr0) + s.Require().NoError(err) + + s.moveToAndCheck(start + 3) + + res, err = s.network.QueryOracle().LatestPrice(ctxWithHeight(start+1), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.Require().NoError(err) + // price not updated yet + s.Require().Equal(res.Price.Price, priceRecovery1.Prices[0].Price) + + res, err = s.network.QueryOracle().LatestPrice(ctxWithHeight(start+2), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.Require().NoError(err) + // price updated from priceRecovery1 to priceRecovery3 + s.Require().Equal(res.Price.Price, priceRecovery3.Prices[0].Price) + + // 2.failed to aggregate + // 2.1 all prices provided are the same(detID, price, decimal), failed for not enough power + // init_start+100 + start += 10 + startUint = uint64(start) + s.moveToAndCheck(start) + msg3.BasedBlock = startUint + msg1.BasedBlock = startUint + msg2_1.BasedBlock = startUint + msg0_1.BasedBlock = startUint + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg3}, "valconsKey3", kr3) + s.Require().NoError(err) + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg1}, "valconsKey1", kr1) + s.Require().NoError(err) + + s.moveToAndCheck(start + 1) + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg2_1}, "valconsKey2", kr2) + s.Require().NoError(err) + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg0_1}, "valconsKey0", kr0) + s.Require().NoError(err) + + s.moveToAndCheck(start + 4) + + res, err = s.network.QueryOracle().LatestPrice(ctxWithHeight(start+3), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.Require().NoError(err) + // price not updated yet + s.Require().Equal(res.Price.Price, priceRecovery3.Prices[0].Price) + + // 2.2 mixed with some different prices(detID, price) + // init_start+110 + start += 10 + startUint = uint64(start) + s.moveToAndCheck(start) + msg3_2.BasedBlock = startUint + msg1_2.BasedBlock = startUint + msg2_2.BasedBlock = startUint + msg0_2.BasedBlock = startUint + // msg0_2.BasedBlock = startUint + // id:3 + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg3_2}, "valconsKey3", kr3) + s.Require().NoError(err) + + // id:1,2,3 + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg1_2}, "valconsKey1", kr1) + s.Require().NoError(err) + + s.moveToAndCheck(start + 1) + + // id:1,2 + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg2_2}, "valconsKey2", kr2) + s.Require().NoError(err) + + // id:3 + // err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg0_2}, "valconsKey0", kr0) + // s.Require().NoError(err) + + s.moveToAndCheck(start + 3) + + res, err = s.network.QueryOracle().LatestPrice(ctxWithHeight(start+2), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.Require().NoError(err) + // price not updated yet + s.Require().Equal(res.Price.Price, priceRecovery3.Prices[0].Price) + + // 2.3 failed for forceSeal by paramsUpdate + // TODO: for now all paramsUpdate related forceSeal are not supported (the related fields are not allowed to be updated by msgUpdateParms) + // we comment out this case for now + // start += 10 + // startUint = uint64(start) + // msg0.BasedBlock = startUint + // msg1.BasedBlock = startUint + // msg2.BasedBlock = startUint + // msgUpdateParams := oracletypes.NewMsgUpdateParams("creator", `{"max_nonce":5}`) + // s.moveNAndCheck(start) + // + // err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg0}, "valconsKey0", kr0) + // s.Require().NoError(err) + // + // err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg1}, "valconsKey1", kr1) + // s.Require().NoError(err) + // + // // send updateParams msg to forceSeal current round + // err = s.network.SendTx([]sdk.Msg{msgUpdateParams}, s.network.Validators[0].ClientCtx.FromName, kr0) + // s.Require().NoError(err) + // s.moveToAndCheck(start + 1) + // + // err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg2}, "valconsKey2", kr2) + // s.Require().NoError(err) + // + // s.moveToAndCheck(start + 3) + // + // res, err = s.network.QueryOracle().LatestPrice(ctxWithHeight(start+2), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + // s.Require().NoError(err) + // // price failed to update + // s.Require().Equal(res.Price.Price, priceRecovery3.Prices[0].Price) + + // 2.4 failed for forceSeal by validatorSetUpdate: we use an old timestamp in genesisfile to setup the network so that the epoch end will be triggered on each block + start += 10 + startUint = uint64(start) + + msg0.BasedBlock = startUint + msg1.BasedBlock = startUint + msg2.BasedBlock = startUint + // msgUpdateParams := oracletypes.NewMsgUpdateParams(s.network.Validators[0].Address.String(), `{"max_nonce":5}`) + s.moveToAndCheck(start) + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg0}, "valconsKey0", kr0) + s.Require().NoError(err) + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg1}, "valconsKey1", kr1) + s.Require().NoError(err) + + // delegate to change validator set, we set genesis time to a history time so that the validator set update will be triggered every block + clientChainID := uint32(101) + lzNonce := uint64(0) + assetAddr, _ := hexutil.Decode(network.ETHAssetAddress) + stakerAddr := []byte(s.network.Validators[0].Address) + operatorAddr := []byte(s.network.Validators[0].Address.String()) + opAmount := big.NewInt(90000000) + // deposit 32 NSTETH to staker from beaconchain_validatro_1 + err = s.network.SendPrecompileTx(network.DELEGATION, "delegate", clientChainID, lzNonce, assetAddr, stakerAddr, operatorAddr, opAmount) + s.Require().NoError(err) + + // power will be updated at endBlock of start+2, it would force seal this round + s.moveToAndCheck(start + 2) + + err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg2}, "valconsKey2", kr2) + s.Require().NotNil(err) + + s.moveToAndCheck(start + 3) + + res, err = s.network.QueryOracle().LatestPrice(ctxWithHeight(start+2), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) + s.NoError(err) + s.Require().Equal(res.Price.Price, priceRecovery3.Prices[0].Price) + + s.moveToAndCheck(start + 20) +} diff --git a/tests/e2e/util.go b/tests/e2e/util.go deleted file mode 100644 index 777e584ad..000000000 --- a/tests/e2e/util.go +++ /dev/null @@ -1,37 +0,0 @@ -package e2e - -import ( - "context" - - sdkmath "cosmossdk.io/math" - "github.com/ExocoreNetwork/exocore/testutil/network" - "github.com/cosmos/cosmos-sdk/crypto/keyring" - sdk "github.com/cosmos/cosmos-sdk/types" - banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" - "github.com/evmos/evmos/v16/crypto/hd" -) - -// func (s *E2ETestSuite) queryNativeCoinBalance(address sdk.AccAddress, n *network.Network) (*banktypes.QueryBalanceResponse, error) { -func QueryNativeCoinBalance(address sdk.AccAddress, n *network.Network) (*banktypes.QueryBalanceResponse, error) { - return n.QueryBank().Balance(context.Background(), &banktypes.QueryBalanceRequest{ - Address: address.String(), - // Denom: s.network.Config.NativeDenom, - Denom: n.Config.NativeDenom, - }) -} - -// func (s *E2ETestSuite) newNativeCoin(amount sdkmath.Int, n *network.Network) sdk.Coin { -func NewNativeCoin(amount sdkmath.Int, n *network.Network) sdk.Coin { - // return sdk.NewCoin(s.network.Config.NativeDenom, amount) - return sdk.NewCoin(n.Config.NativeDenom, amount) -} - -func GenerateAccAddress(kr keyring.Keyring, name string) (sdk.AccAddress, error) { - // generate a new account with ethsecp256k1 - r, _, err := kr.NewMnemonic(name, keyring.English, sdk.GetConfig().GetFullBIP44Path(), "", hd.EthSecp256k1) - if err != nil { - return nil, err - } - addr, _ := r.GetAddress() - return addr, nil -} diff --git a/testutil/keeper/oracle.go b/testutil/keeper/oracle.go index 40ee1d705..8a8fbe321 100644 --- a/testutil/keeper/oracle.go +++ b/testutil/keeper/oracle.go @@ -97,6 +97,7 @@ func OracleKeeper(t testing.TB) (*keeper.Keeper, sdk.Context) { Interval: 10, }) k.SetParams(ctx, p4Test) + k.FeederManager.InitCachesForTest(k, &p4Test, nil) return &k, ctx } diff --git a/testutil/network/genesis_data.go b/testutil/network/genesis_data.go index 258a05007..ed8294f23 100644 --- a/testutil/network/genesis_data.go +++ b/testutil/network/genesis_data.go @@ -40,8 +40,8 @@ var ( }, }, Tokens: []assetstypes.StakingAssetInfo{ - NewTestToken("ETH", "Ethereum native token", ETHAssetAddress, TestEVMChainID, 5000), - NewTestToken("NST ETH", "native restaking ETH", NativeAssetAddress, TestEVMChainID, 5000), + NewTestToken("ETH", "Ethereum native token", ETHAssetAddress, TestEVMChainID, 0, 5000), + NewTestToken("NST ETH", "native restaking ETH", NativeAssetAddress, TestEVMChainID, 0, 5000), }, } @@ -62,7 +62,7 @@ func init() { Name: "ETH", ChainID: 1, ContractAddress: "0x", - Decimal: 18, + Decimal: 8, Active: true, // bond assetsIDs of ETH, NSTETH to ETH price AssetID: fmt.Sprintf("%s,%s", ETHAssetID, NativeAssetID), @@ -105,11 +105,11 @@ func init() { }) // set slashing_miss window to 4 DefaultGenStateOracle.Params.Slashing.ReportedRoundsWindow = 4 - // set jailduration of oracle report downtime to 30 seconds for test - DefaultGenStateOracle.Params.Slashing.OracleMissJailDuration = 30 * time.Second + // set jailduration of oracle report downtime to 15 seconds for test + DefaultGenStateOracle.Params.Slashing.OracleMissJailDuration = 15 * time.Second } -func NewTestToken(name, metaInfo, address string, chainID uint64, amount int64) assetstypes.StakingAssetInfo { +func NewTestToken(name, metaInfo, address string, chainID uint64, decimal uint32, amount int64) assetstypes.StakingAssetInfo { if name == "" { panic("token name cannot be empty") } @@ -120,6 +120,7 @@ func NewTestToken(name, metaInfo, address string, chainID uint64, amount int64) AssetBasicInfo: assetstypes.AssetInfo{ Name: name, MetaInfo: metaInfo, + Decimals: decimal, Address: address, LayerZeroChainID: chainID, }, diff --git a/testutil/network/network.go b/testutil/network/network.go index c393f2c50..1a4260c6f 100644 --- a/testutil/network/network.go +++ b/testutil/network/network.go @@ -118,12 +118,13 @@ func DefaultConfig() Config { NumValidators: 4, NativeDenom: "hua", MinGasPrices: "10hua", - AccountTokens: sdk.TokensFromConsensusPower(1000, evmostypes.PowerReduction), - DepositedTokens: sdk.TokensFromConsensusPower(500, evmostypes.PowerReduction), - StakingTokens: sdk.TokensFromConsensusPower(200, evmostypes.PowerReduction), - PruningStrategy: pruningtypes.PruningOptionNothing, - CleanupDir: true, - SigningAlgo: string(hd.EthSecp256k1Type), + AccountTokens: sdk.NewInt(150000000), + DepositedTokens: sdk.NewInt(110000000), + StakingTokens: sdk.NewInt(20000000), + + PruningStrategy: pruningtypes.PruningOptionNothing, + CleanupDir: true, + SigningAlgo: string(hd.EthSecp256k1Type), // KeyringOptions: []keyring.Option{hd.EthSecp256k1Option()}, KeyringOptions: []keyring.Option{exocorecrypto.Ed25519Option()}, PrintMnemonic: false, diff --git a/testutil/network/util.go b/testutil/network/util.go index ff5e86596..d6b56f5e7 100644 --- a/testutil/network/util.go +++ b/testutil/network/util.go @@ -3,6 +3,7 @@ package network import ( "encoding/json" "fmt" + "os" "path/filepath" "time" @@ -36,7 +37,6 @@ import ( cmttime "github.com/cometbft/cometbft/types/time" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/evmos/evmos/v16/server" - evmostypes "github.com/evmos/evmos/v16/types" evmtypes "github.com/evmos/evmos/v16/x/evm/types" feemarkettypes "github.com/evmos/evmos/v16/x/feemarket/types" ) @@ -259,6 +259,10 @@ func initGenFiles(cfg Config, genAccounts []authtypes.GenesisAccount, genBalance // generate empty genesis files for each validator and save gTime := cmttime.Now() + if os.Getenv("TEST_OPTION") == "local" { + gTime = gTime.Add(-300 * time.Minute) + } + // we use a time 100 minutes before now, to trigger epoch change for each block in the early blocks(more than 100 blocks) for i := 0; i < cfg.NumValidators; i++ { if genDoc.InitialHeight == 0 { genDoc.InitialHeight = 1 @@ -365,6 +369,7 @@ func NewGenStateOperator(operatorAccAddresses []sdk.AccAddress, consPubKeys []st OperatorAddress: operatorAccAddress.String(), OperatorInfo: operatortypes.OperatorInfo{ EarningsAddr: operatorAccAddress.String(), + ApproveAddr: operatorAccAddress.String(), OperatorMetaInfo: fmt.Sprintf("operator_%d", i), Commission: stakingtypes.Commission{ CommissionRates: stakingtypes.CommissionRates{ @@ -397,7 +402,7 @@ func NewGenStateOperator(operatorAccAddresses []sdk.AccAddress, consPubKeys []st }) // OperatorUSDValues // the price unit of assets is 1 not decimal 18 - stakingValue := sdk.TokensToConsensusPower(stakingAmount, evmostypes.PowerReduction) + stakingValue := stakingAmount.Int64() DefaultGenStateOperator.OperatorUSDValues = append(DefaultGenStateOperator.OperatorUSDValues, operatortypes.OperatorUSDValue{ Key: AVSAddress + "/" + operatorAccAddress.String(), OptedUSDValue: operatortypes.OperatorOptedUSDValue{ @@ -414,7 +419,7 @@ func NewGenStateOperator(operatorAccAddresses []sdk.AccAddress, consPubKeys []st AVSAddr: AVSAddress, Value: operatortypes.DecValueField{ // the price unit of assets is 1 not decimal 18 - Amount: sdkmath.LegacyNewDec(sdk.TokensToConsensusPower(totalStakingAmount, evmostypes.PowerReduction)), + Amount: sdkmath.LegacyNewDec(totalStakingAmount.Int64()), }, }) } @@ -425,7 +430,7 @@ func NewGenStateOperator(operatorAccAddresses []sdk.AccAddress, consPubKeys []st // stakingAmount is the amount each operator have for every single asset defined in assets module, so for a single operator the total stakingAmount they have is stakingAmount*count(assets) // assets genesis state is required as input argument to provide assets information. It should be called with NewGenStateAssets to update default assets genesis state for test func NewGenStateDogfood(consPubKeys []string, stakingAmount sdkmath.Int, genStateAssets assetstypes.GenesisState) (dogfoodtypes.GenesisState, error) { - power := sdk.TokensToConsensusPower(stakingAmount.Mul(sdkmath.NewInt(int64(len(genStateAssets.Tokens)))), evmostypes.PowerReduction) + power := stakingAmount.Mul(sdkmath.NewInt(int64(len(genStateAssets.Tokens)))).Int64() DefaultGenStateDogfood.Params.EpochIdentifier = "minute" DefaultGenStateDogfood.Params.EpochsUntilUnbonded = 5 DefaultGenStateDogfood.Params.MinSelfDelegation = sdkmath.NewInt(100) diff --git a/testutil/utils.go b/testutil/utils.go index 7814089c8..5ff3a9896 100644 --- a/testutil/utils.go +++ b/testutil/utils.go @@ -260,6 +260,7 @@ func (suite *BaseTestSuite) SetupWithGenesisValSet(genAccs []authtypes.GenesisAc OperatorInfo: operatortypes.OperatorInfo{ EarningsAddr: operator1.String(), OperatorMetaInfo: "operator1", + ApproveAddr: operator1.String(), Commission: stakingtypes.NewCommission(sdk.ZeroDec(), sdk.ZeroDec(), sdk.ZeroDec()), }, }, @@ -268,6 +269,7 @@ func (suite *BaseTestSuite) SetupWithGenesisValSet(genAccs []authtypes.GenesisAc OperatorInfo: operatortypes.OperatorInfo{ EarningsAddr: operator2.String(), OperatorMetaInfo: "operator2", + ApproveAddr: operator2.String(), Commission: stakingtypes.NewCommission(sdk.ZeroDec(), sdk.ZeroDec(), sdk.ZeroDec()), }, }, diff --git a/utils/utils.go b/utils/utils.go index 8cca82854..a2d5b83ee 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -2,6 +2,7 @@ package utils import ( "bytes" + "encoding/binary" "sort" "strings" @@ -220,3 +221,20 @@ func AppendMany(byteses ...[]byte) (out []byte) { } return out } + +// Uint32ToBigEndian - marshals uint32 to a bigendian byte slice so it can be sorted +func Uint32ToBigEndian(i uint32) []byte { + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, i) + return b +} + +// BigEndianToUint32 returns an uint32 from big endian encoded bytes. If encoding +// is empty, zero is returned. +func BigEndianToUint32(bz []byte) uint32 { + if len(bz) == 0 { + return 0 + } + + return binary.BigEndian.Uint32(bz) +} diff --git a/x/assets/keeper/bank.go b/x/assets/keeper/bank.go index eff864cde..4940ee38e 100644 --- a/x/assets/keeper/bank.go +++ b/x/assets/keeper/bank.go @@ -19,46 +19,69 @@ type DepositWithdrawParams struct { } // PerformDepositOrWithdraw the assets precompile contract will call this function to update asset state -// when there is a deposit or withdraw. -func (k Keeper) PerformDepositOrWithdraw(ctx sdk.Context, params *DepositWithdrawParams) error { +// when there is a deposit or withdraw. It returns the final deposit amount, post completion of the deposit +// or withdraw operation. +func (k Keeper) PerformDepositOrWithdraw( + ctx sdk.Context, params *DepositWithdrawParams, +) (sdkmath.Int, error) { // check params parameter before executing operation - if params.OpAmount.IsNegative() { - return assetstypes.ErrInvalidAmount.Wrapf( - "negative amount:%s", params.OpAmount, + if !params.OpAmount.IsPositive() { + return sdkmath.ZeroInt(), assetstypes.ErrInvalidAmount.Wrapf( + "non-positive amount:%s", params.OpAmount, ) } + + // check if staking asset exists stakerID, assetID := assetstypes.GetStakerIDAndAssetID(params.ClientChainLzID, params.StakerAddress, params.AssetsAddress) if !k.IsStakingAsset(ctx, assetID) { - return errorsmod.Wrapf(assetstypes.ErrNoClientChainAssetKey, "assetAddr:%s clientChainID:%v", hexutil.Encode(params.AssetsAddress), params.ClientChainLzID) + return sdkmath.ZeroInt(), assetstypes.ErrNoClientChainAssetKey.Wrapf( + "assetAddr:%s clientChainID:%v", + hexutil.Encode(params.AssetsAddress), params.ClientChainLzID, + ) + } + + // even though this is unlikely to be true, guard against it. + if assetID == assetstypes.ExocoreAssetID { + return sdkmath.ZeroInt(), assetstypes.ErrNoClientChainAssetKey.Wrapf( + "cannot deposit exo native assetID:%s", assetID, + ) } + // add the sign to the (previously positive) amount actualOpAmount := params.OpAmount switch params.Action { case assetstypes.DepositLST, assetstypes.DepositNST: case assetstypes.WithdrawLST, assetstypes.WithdrawNST: actualOpAmount = actualOpAmount.Neg() default: - return errorsmod.Wrapf(assetstypes.ErrInvalidOperationType, "the operation type is: %v", params.Action) + return sdkmath.ZeroInt(), assetstypes.ErrInvalidOperationType.Wrapf( + "the operation type is: %v", params.Action, + ) } changeAmount := assetstypes.DeltaStakerSingleAsset{ TotalDepositAmount: actualOpAmount, WithdrawableAmount: actualOpAmount, } - // don't update staker info for exo-native-token - // TODO: do we need additional process for exo-native-token ? - if assetID != assetstypes.ExocoreAssetID { - // update asset state of the specified staker - err := k.UpdateStakerAssetState(ctx, stakerID, assetID, changeAmount) - if err != nil { - return errorsmod.Wrapf(err, "stakerID:%s assetID:%s", stakerID, assetID) - } + // update asset state of the specified staker + info, err := k.UpdateStakerAssetState(ctx, stakerID, assetID, changeAmount) + if err != nil { + return sdkmath.ZeroInt(), errorsmod.Wrapf( + err, "stakerID:%s assetID:%s", stakerID, assetID, + ) + } - // update total amount of the deposited asset - err = k.UpdateStakingAssetTotalAmount(ctx, assetID, actualOpAmount) - if err != nil { - return errorsmod.Wrapf(err, "assetID:%s", assetID) - } + // update total amount of the deposited asset + err = k.UpdateStakingAssetTotalAmount(ctx, assetID, actualOpAmount) + if err != nil { + return sdkmath.ZeroInt(), errorsmod.Wrapf(err, "assetID:%s", assetID) } - return nil + + // TODO: consider emitting EVM event? + // currently such events are emitted by the ExocoreGateway so this may not be + // necessary. however, there is no large downside in emitting equivalent EVM + // events here. + + // return the final deposit amount + return info.TotalDepositAmount, nil } diff --git a/x/assets/keeper/client_chain.go b/x/assets/keeper/client_chain.go index ebd3ff573..d69eb3fae 100644 --- a/x/assets/keeper/client_chain.go +++ b/x/assets/keeper/client_chain.go @@ -1,6 +1,8 @@ package keeper import ( + "fmt" + assetstype "github.com/ExocoreNetwork/exocore/x/assets/types" "github.com/cosmos/cosmos-sdk/store/prefix" sdk "github.com/cosmos/cosmos-sdk/types" @@ -9,13 +11,34 @@ import ( // SetClientChainInfo todo: Temporarily use LayerZeroChainID as key. // It provides a function to register the client chains supported by exoCore.It's called by genesis configuration now,however it will be called by the governance in the future -func (k Keeper) SetClientChainInfo(ctx sdk.Context, info *assetstype.ClientChainInfo) (err error) { +func (k Keeper) SetClientChainInfo(ctx sdk.Context, info *assetstype.ClientChainInfo) (bool, error) { store := prefix.NewStore(ctx.KVStore(k.storeKey), assetstype.KeyPrefixClientChainInfo) + key := []byte(hexutil.EncodeUint64(info.LayerZeroChainID)) + + eventType := assetstype.EventTypeNewClientChain + updated := store.Has(key) + if updated { + eventType = assetstype.EventTypeUpdatedClientChain + } bz := k.cdc.MustMarshal(info) + store.Set(key, bz) - store.Set([]byte(hexutil.EncodeUint64(info.LayerZeroChainID)), bz) - return nil + ctx.EventManager().EmitEvent( + sdk.NewEvent( + eventType, + sdk.NewAttribute(assetstype.AttributeKeyName, info.Name), + sdk.NewAttribute(assetstype.AttributeKeyMetaInfo, info.MetaInfo), + sdk.NewAttribute(assetstype.AttributeKeyChainID, fmt.Sprintf("%d", info.ChainId)), + sdk.NewAttribute(assetstype.AttributeKeyExocoreChainIdx, fmt.Sprintf("%d", info.ExocoreChainIndex)), + sdk.NewAttribute(assetstype.AttributeKeyFinalizationBlocks, fmt.Sprintf("%d", info.FinalizationBlocks)), + sdk.NewAttribute(assetstype.AttributeKeyLZID, fmt.Sprintf("%d", info.LayerZeroChainID)), + sdk.NewAttribute(assetstype.AttributeKeySigType, info.SignatureType), + sdk.NewAttribute(assetstype.AttributeKeyAddrLength, fmt.Sprintf("%d", info.AddressLength)), + ), + ) + + return updated, nil } func (k Keeper) ClientChainExists(ctx sdk.Context, index uint64) bool { diff --git a/x/assets/keeper/client_chain_asset.go b/x/assets/keeper/client_chain_asset.go index 06a861cbb..1e1d2fa4b 100644 --- a/x/assets/keeper/client_chain_asset.go +++ b/x/assets/keeper/client_chain_asset.go @@ -1,6 +1,8 @@ package keeper import ( + "fmt" + errorsmod "cosmossdk.io/errors" sdkmath "cosmossdk.io/math" assetstype "github.com/ExocoreNetwork/exocore/x/assets/types" @@ -31,6 +33,16 @@ func (k Keeper) UpdateStakingAssetTotalAmount(ctx sdk.Context, assetID string, c } bz := k.cdc.MustMarshal(&ret) store.Set(key, bz) + + // emit event for indexers + ctx.EventManager().EmitEvent( + sdk.NewEvent( + assetstype.EventTypeUpdatedStakingTotalAmount, + sdk.NewAttribute(assetstype.AttributeKeyAssetID, assetID), + sdk.NewAttribute(assetstype.AttributeKeyTotalAmount, ret.StakingTotalAmount.String()), + ), + ) + return nil } @@ -55,6 +67,20 @@ func (k Keeper) SetStakingAssetInfo(ctx sdk.Context, info *assetstype.StakingAss } bz := k.cdc.MustMarshal(info) store.Set([]byte(assetID), bz) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + assetstype.EventTypeNewToken, + sdk.NewAttribute(assetstype.AttributeKeyAssetID, assetID), + sdk.NewAttribute(assetstype.AttributeKeyName, info.AssetBasicInfo.Name), + sdk.NewAttribute(assetstype.AttributeKeySymbol, info.AssetBasicInfo.Symbol), + sdk.NewAttribute(assetstype.AttributeKeyAddress, info.AssetBasicInfo.Address), + sdk.NewAttribute(assetstype.AttributeKeyDecimals, fmt.Sprintf("%d", info.AssetBasicInfo.Decimals)), + sdk.NewAttribute(assetstype.AttributeKeyLZID, fmt.Sprintf("%d", info.AssetBasicInfo.LayerZeroChainID)), + sdk.NewAttribute(assetstype.AttributeKeyMetaInfo, info.AssetBasicInfo.MetaInfo), + sdk.NewAttribute(assetstype.AttributeKeyExocoreChainIdx, fmt.Sprintf("%d", info.AssetBasicInfo.ExocoreChainIndex)), + sdk.NewAttribute(assetstype.AttributeKeyTotalAmount, info.StakingTotalAmount.String()), + ), + ) return nil } @@ -75,6 +101,13 @@ func (k Keeper) UpdateStakingAssetMetaInfo(ctx sdk.Context, assetID string, meta info.AssetBasicInfo.MetaInfo = metainfo bz := k.cdc.MustMarshal(info) store.Set([]byte(assetID), bz) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + assetstype.EventTypeUpdatedToken, + sdk.NewAttribute(assetstype.AttributeKeyAssetID, assetID), + sdk.NewAttribute(assetstype.AttributeKeyMetaInfo, metainfo), + ), + ) return nil } diff --git a/x/assets/keeper/genesis.go b/x/assets/keeper/genesis.go index 2a5067150..6686a4259 100644 --- a/x/assets/keeper/genesis.go +++ b/x/assets/keeper/genesis.go @@ -17,8 +17,11 @@ func (k Keeper) InitGenesis(ctx sdk.Context, data *types.GenesisState) { // client_chain.go for i := range data.ClientChains { info := data.ClientChains[i] - if err := k.SetClientChainInfo(ctx, &info); err != nil { + if updated, err := k.SetClientChainInfo(ctx, &info); err != nil { panic(errorsmod.Wrap(err, "failed to set client chain info")) + } else if updated { + // should not happen if validate-genesis has been called. + panic(errorsmod.Wrapf(types.ErrInvalidGenesisData, "duplicate client chain found: %s", info.Name)) } } // client_chain_asset.go @@ -40,7 +43,8 @@ func (k Keeper) InitGenesis(ctx sdk.Context, data *types.GenesisState) { info := depositsByStaker.Info infoAsChange := types.DeltaStakerSingleAsset(info) // set the deposited and free values for the staker - if err := k.UpdateStakerAssetState( + // this will not emit an event. + if _, err := k.UpdateStakerAssetState( ctx, stakerID, assetID, infoAsChange, ); err != nil { panic(errorsmod.Wrap(err, "failed to set deposit info")) diff --git a/x/assets/keeper/keeper.go b/x/assets/keeper/keeper.go index b4c82ff39..e379f8d4e 100644 --- a/x/assets/keeper/keeper.go +++ b/x/assets/keeper/keeper.go @@ -3,6 +3,7 @@ package keeper import ( "fmt" + sdkmath "cosmossdk.io/math" assetstype "github.com/ExocoreNetwork/exocore/x/assets/types" "github.com/cosmos/cosmos-sdk/codec" storetypes "github.com/cosmos/cosmos-sdk/store/types" @@ -59,7 +60,7 @@ func (k Keeper) SetOperatorAssetOptedInMiddleWare(sdk.Address, map[string]sdk.Ad // IAssets interface will be implemented by assets keeper type IAssets interface { - SetClientChainInfo(ctx sdk.Context, info *assetstype.ClientChainInfo) (err error) + SetClientChainInfo(ctx sdk.Context, info *assetstype.ClientChainInfo) (bool, error) GetClientChainInfoByIndex(ctx sdk.Context, index uint64) (info *assetstype.ClientChainInfo, err error) GetAllClientChainInfo(ctx sdk.Context) (infos []assetstype.ClientChainInfo, err error) @@ -69,10 +70,12 @@ type IAssets interface { GetStakerAssetInfos(ctx sdk.Context, stakerID string) (assetsInfo []assetstype.DepositByAsset, err error) GetStakerSpecifiedAssetInfo(ctx sdk.Context, stakerID string, assetID string) (info *assetstype.StakerAssetInfo, err error) - UpdateStakerAssetState(ctx sdk.Context, stakerID string, assetID string, changeAmount assetstype.DeltaStakerSingleAsset) (err error) + UpdateStakerAssetState( + ctx sdk.Context, stakerID string, assetID string, changeAmount assetstype.DeltaStakerSingleAsset, + ) (*assetstype.StakerAssetInfo, error) GetOperatorAssetInfos(ctx sdk.Context, operatorAddr sdk.Address, assetsFilter map[string]interface{}) (assetsInfo []assetstype.AssetByID, err error) GetOperatorSpecifiedAssetInfo(ctx sdk.Context, operatorAddr sdk.Address, assetID string) (info *assetstype.OperatorAssetInfo, err error) UpdateOperatorAssetState(ctx sdk.Context, operatorAddr sdk.Address, assetID string, changeAmount assetstype.DeltaOperatorSingleAsset) (err error) - PerformDepositOrWithdraw(ctx sdk.Context, params *DepositWithdrawParams) error + PerformDepositOrWithdraw(ctx sdk.Context, params *DepositWithdrawParams) (finalDepositAmount sdkmath.Int, err error) } diff --git a/x/assets/keeper/operator_asset.go b/x/assets/keeper/operator_asset.go index 0f6a725c2..a1f4242bc 100644 --- a/x/assets/keeper/operator_asset.go +++ b/x/assets/keeper/operator_asset.go @@ -116,6 +116,19 @@ func (k Keeper) UpdateOperatorAssetState(ctx sdk.Context, operatorAddr sdk.Addre // store the updated state bz := k.cdc.MustMarshal(&assetState) store.Set(key, bz) + + ctx.EventManager().EmitEvent( + sdk.NewEvent( + assetstype.EventTypeUpdatedOperatorAsset, + sdk.NewAttribute(assetstype.AttributeKeyOperatorAddress, operatorAddr.String()), + sdk.NewAttribute(assetstype.AttributeKeyAssetID, assetID), + sdk.NewAttribute(assetstype.AttributeKeyTotalAmount, assetState.TotalAmount.String()), + sdk.NewAttribute(assetstype.AttributeKeyPendingUndelegationAmount, assetState.PendingUndelegationAmount.String()), + sdk.NewAttribute(assetstype.AttributeKeyTotalShare, assetState.TotalShare.String()), + sdk.NewAttribute(assetstype.AttributeKeyOperatorShare, assetState.OperatorShare.String()), + ), + ) + return nil } @@ -133,12 +146,13 @@ func (k Keeper) IterateAssetsForOperator(ctx sdk.Context, isUpdate bool, operato if err != nil { return err } + assetID := keys[1] if assetsFilter != nil { - if _, ok := assetsFilter[keys[1]]; !ok { + if _, ok := assetsFilter[assetID]; !ok { continue } } - err = opFunc(keys[1], &amounts) + err = opFunc(assetID, &amounts) if err != nil { return err } @@ -146,6 +160,17 @@ func (k Keeper) IterateAssetsForOperator(ctx sdk.Context, isUpdate bool, operato // store the updated state bz := k.cdc.MustMarshal(&amounts) store.Set(iterator.Key(), bz) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + assetstype.EventTypeUpdatedOperatorAsset, + sdk.NewAttribute(assetstype.AttributeKeyOperatorAddress, operator), + sdk.NewAttribute(assetstype.AttributeKeyAssetID, assetID), + sdk.NewAttribute(assetstype.AttributeKeyTotalAmount, amounts.TotalAmount.String()), + sdk.NewAttribute(assetstype.AttributeKeyPendingUndelegationAmount, amounts.PendingUndelegationAmount.String()), + sdk.NewAttribute(assetstype.AttributeKeyTotalShare, amounts.TotalShare.String()), + sdk.NewAttribute(assetstype.AttributeKeyOperatorShare, amounts.OperatorShare.String()), + ), + ) } } return nil diff --git a/x/assets/keeper/staker_asset.go b/x/assets/keeper/staker_asset.go index c68ec013f..68339981f 100644 --- a/x/assets/keeper/staker_asset.go +++ b/x/assets/keeper/staker_asset.go @@ -138,7 +138,9 @@ func (k Keeper) GetStakerSpecifiedAssetInfo(ctx sdk.Context, stakerID string, as // UpdateStakerAssetState is used to update the staker asset state // The input `changeAmount` represents the values that you want to add or decrease,using positive or negative values for increasing and decreasing,respectively. The function will calculate and update new state after a successful check. // The function will be called when there is deposit or withdraw related to the specified staker. -func (k Keeper) UpdateStakerAssetState(ctx sdk.Context, stakerID string, assetID string, changeAmount assetstype.DeltaStakerSingleAsset) (err error) { +func (k Keeper) UpdateStakerAssetState( + ctx sdk.Context, stakerID string, assetID string, changeAmount assetstype.DeltaStakerSingleAsset, +) (info *assetstype.StakerAssetInfo, err error) { // get the latest state,use the default initial state if the state hasn't been stored store := prefix.NewStore(ctx.KVStore(k.storeKey), assetstype.KeyPrefixReStakerAssetInfos) key := assetstype.GetJoinedStoreKey(stakerID, assetID) @@ -154,22 +156,50 @@ func (k Keeper) UpdateStakerAssetState(ctx sdk.Context, stakerID string, assetID // update all states of the specified restaker asset err = assetstype.UpdateAssetValue(&assetState.TotalDepositAmount, &changeAmount.TotalDepositAmount) if err != nil { - return errorsmod.Wrap(err, "UpdateStakerAssetState TotalDepositAmount error") + return nil, errorsmod.Wrap(err, "UpdateStakerAssetState TotalDepositAmount error") } err = assetstype.UpdateAssetValue(&assetState.WithdrawableAmount, &changeAmount.WithdrawableAmount) if err != nil { - return errorsmod.Wrap(err, "UpdateStakerAssetState CanWithdrawAmountOrWantChangeValue error") + return nil, errorsmod.Wrap(err, "UpdateStakerAssetState CanWithdrawAmountOrWantChangeValue error") } err = assetstype.UpdateAssetValue(&assetState.PendingUndelegationAmount, &changeAmount.PendingUndelegationAmount) if err != nil { - return errorsmod.Wrap(err, "UpdateStakerAssetState WaitUndelegationAmountOrWantChangeValue error") + return nil, errorsmod.Wrap(err, "UpdateStakerAssetState WaitUndelegationAmountOrWantChangeValue error") } // store the updated state bz := k.cdc.MustMarshal(&assetState) store.Set(key, bz) - return nil + // emit event with new amount. + // the indexer can pick this up and update the staker's asset state + // without needing to know the prior state. it can also use the + // event type to index a deposit or withdrawal history. + // this event is only emitted here; callers of this function with + // other side effects may emit events dedicated to those side effects + // in addition to this event. + ctx.EventManager().EmitEvent( + sdk.NewEvent( + assetstype.EventTypeUpdatedStakerAsset, + sdk.NewAttribute( + assetstype.AttributeKeyStakerID, stakerID, + ), + sdk.NewAttribute( + assetstype.AttributeKeyAssetID, assetID, + ), + sdk.NewAttribute( + assetstype.AttributeKeyDepositAmount, assetState.TotalDepositAmount.String(), + ), + sdk.NewAttribute( + assetstype.AttributeKeyWithdrawableAmount, assetState.WithdrawableAmount.String(), + ), + sdk.NewAttribute( + assetstype.AttributeKeyPendingUndelegationAmount, assetState.PendingUndelegationAmount.String(), + ), + ), + ) + + return &assetState, nil } func (k Keeper) GetStakerBalanceByAsset(ctx sdk.Context, stakerID string, assetID string) (balance assetstype.StakerBalance, err error) { diff --git a/x/assets/keeper/staker_asset_test.go b/x/assets/keeper/staker_asset_test.go index 7e8530075..78815a6fc 100644 --- a/x/assets/keeper/staker_asset_test.go +++ b/x/assets/keeper/staker_asset_test.go @@ -17,7 +17,7 @@ func (suite *StakingAssetsTestSuite) TestUpdateStakerAssetsState() { } // test the initial storage of statker assets state - err := suite.App.AssetsKeeper.UpdateStakerAssetState(suite.Ctx, stakerID, ethUniAssetID, ethUniInitialChangeValue) + _, err := suite.App.AssetsKeeper.UpdateStakerAssetState(suite.Ctx, stakerID, ethUniAssetID, ethUniInitialChangeValue) suite.Require().NoError(err) // test that the retrieved value is correct @@ -29,7 +29,7 @@ func (suite *StakingAssetsTestSuite) TestUpdateStakerAssetsState() { // test valid increase of staker asset state ethUniInitialChangeValue.TotalDepositAmount = math.NewInt(500) ethUniInitialChangeValue.WithdrawableAmount = math.NewInt(500) - err = suite.App.AssetsKeeper.UpdateStakerAssetState(suite.Ctx, stakerID, ethUniAssetID, ethUniInitialChangeValue) + _, err = suite.App.AssetsKeeper.UpdateStakerAssetState(suite.Ctx, stakerID, ethUniAssetID, ethUniInitialChangeValue) suite.Require().NoError(err) getInfo, err = suite.App.AssetsKeeper.GetStakerSpecifiedAssetInfo(suite.Ctx, stakerID, ethUniAssetID) @@ -40,7 +40,7 @@ func (suite *StakingAssetsTestSuite) TestUpdateStakerAssetsState() { // test valid decrease of staker asset state ethUniInitialChangeValue.TotalDepositAmount = math.NewInt(-500) ethUniInitialChangeValue.WithdrawableAmount = math.NewInt(-500) - err = suite.App.AssetsKeeper.UpdateStakerAssetState(suite.Ctx, stakerID, ethUniAssetID, ethUniInitialChangeValue) + _, err = suite.App.AssetsKeeper.UpdateStakerAssetState(suite.Ctx, stakerID, ethUniAssetID, ethUniInitialChangeValue) suite.Require().NoError(err) getInfo, err = suite.App.AssetsKeeper.GetStakerSpecifiedAssetInfo(suite.Ctx, stakerID, ethUniAssetID) suite.Require().NoError(err) @@ -50,7 +50,7 @@ func (suite *StakingAssetsTestSuite) TestUpdateStakerAssetsState() { // test the decreased amount is bigger than original state ethUniInitialChangeValue.TotalDepositAmount = math.NewInt(-2000) ethUniInitialChangeValue.WithdrawableAmount = math.NewInt(-500) - err = suite.App.AssetsKeeper.UpdateStakerAssetState(suite.Ctx, stakerID, ethUniAssetID, ethUniInitialChangeValue) + _, err = suite.App.AssetsKeeper.UpdateStakerAssetState(suite.Ctx, stakerID, ethUniAssetID, ethUniInitialChangeValue) suite.Require().Error(err, assetstype.ErrSubAmountIsMoreThanOrigin) getInfo, err = suite.App.AssetsKeeper.GetStakerSpecifiedAssetInfo(suite.Ctx, stakerID, ethUniAssetID) suite.Require().NoError(err) @@ -59,7 +59,7 @@ func (suite *StakingAssetsTestSuite) TestUpdateStakerAssetsState() { ethUniInitialChangeValue.TotalDepositAmount = math.NewInt(-500) ethUniInitialChangeValue.WithdrawableAmount = math.NewInt(-2000) - err = suite.App.AssetsKeeper.UpdateStakerAssetState(suite.Ctx, stakerID, ethUniAssetID, ethUniInitialChangeValue) + _, err = suite.App.AssetsKeeper.UpdateStakerAssetState(suite.Ctx, stakerID, ethUniAssetID, ethUniInitialChangeValue) suite.Require().Error(err, assetstype.ErrSubAmountIsMoreThanOrigin) getInfo, err = suite.App.AssetsKeeper.GetStakerSpecifiedAssetInfo(suite.Ctx, stakerID, ethUniAssetID) suite.Require().NoError(err) @@ -72,7 +72,7 @@ func (suite *StakingAssetsTestSuite) TestUpdateStakerAssetsState() { TotalDepositAmount: math.NewInt(2000), WithdrawableAmount: math.NewInt(2000), } - err = suite.App.AssetsKeeper.UpdateStakerAssetState(suite.Ctx, stakerID, ethUsdtAssetID, ethUsdtInitialChangeValue) + _, err = suite.App.AssetsKeeper.UpdateStakerAssetState(suite.Ctx, stakerID, ethUsdtAssetID, ethUsdtInitialChangeValue) suite.Require().NoError(err) getInfo, err = suite.App.AssetsKeeper.GetStakerSpecifiedAssetInfo(suite.Ctx, stakerID, ethUsdtAssetID) suite.Require().NoError(err) @@ -104,9 +104,9 @@ func (suite *StakingAssetsTestSuite) TestGetStakerAssetInfos() { Info: assetstype.StakerAssetInfo(ethUsdtInitialChangeValue), }, } - err := suite.App.AssetsKeeper.UpdateStakerAssetState(suite.Ctx, stakerID, ethUniAssetID, ethUniInitialChangeValue) + _, err := suite.App.AssetsKeeper.UpdateStakerAssetState(suite.Ctx, stakerID, ethUniAssetID, ethUniInitialChangeValue) suite.Require().NoError(err) - err = suite.App.AssetsKeeper.UpdateStakerAssetState(suite.Ctx, stakerID, ethUsdtAssetID, ethUsdtInitialChangeValue) + _, err = suite.App.AssetsKeeper.UpdateStakerAssetState(suite.Ctx, stakerID, ethUsdtAssetID, ethUsdtInitialChangeValue) suite.Require().NoError(err) // test get all assets state of staker diff --git a/x/assets/types/asset_info.go b/x/assets/types/asset_info.go new file mode 100644 index 000000000..c3f370e0a --- /dev/null +++ b/x/assets/types/asset_info.go @@ -0,0 +1,14 @@ +package types + +// AssetID obtains the asset ID from the staking asset info +func (info StakingAssetInfo) AssetID() string { + return info.AssetBasicInfo.AssetID() +} + +// AssetID obtains the asset ID from the asset info +func (info AssetInfo) AssetID() string { + _, assetID := GetStakerIDAndAssetIDFromStr( + info.LayerZeroChainID, "", info.Address, + ) + return assetID +} diff --git a/x/assets/types/events.go b/x/assets/types/events.go new file mode 100644 index 000000000..a088fb68c --- /dev/null +++ b/x/assets/types/events.go @@ -0,0 +1,44 @@ +package types + +// x/assets events +const ( + // staker asset state updated + EventTypeUpdatedStakerAsset = "staker_asset_updated" + AttributeKeyStakerID = "staker_id" + AttributeKeyAssetID = "asset_id" + AttributeKeyDepositAmount = "deposit_amount" + AttributeKeyWithdrawableAmount = "withdrawable_amount" + AttributeKeyPendingUndelegationAmount = "pending_undelegation_amount" + + // client chain addition or update + EventTypeNewClientChain = "client_chain_added" + EventTypeUpdatedClientChain = "client_chain_updated" + AttributeKeyName = "name" + AttributeKeyMetaInfo = "meta_info" + AttributeKeyChainID = "chain_id" + AttributeKeyExocoreChainIdx = "exocore_chain_index" + AttributeKeyFinalizationBlocks = "finalization_blocks" + AttributeKeyLZID = "layer_zero_chain_id" + AttributeKeySigType = "signature_type" + AttributeKeyAddrLength = "address_length" + + // token addition + EventTypeNewToken = "token_added" + AttributeKeySymbol = "symbol" + AttributeKeyAddress = "address" + AttributeKeyDecimals = "decimals" + + // token update + EventTypeUpdatedToken = "token_updated" + + // operator asset state updated + EventTypeUpdatedOperatorAsset = "operator_asset_updated" + AttributeKeyOperatorAddress = "operator_address" + AttributeKeyTotalAmount = "total_amount" + AttributeKeyTotalShare = "total_share" + AttributeKeyOperatorShare = "operator_share" + + // token deposit amount updated; useful for tracking total deposited of an asset. + // note that this amount includes lifetime slashed quantity of that token. + EventTypeUpdatedStakingTotalAmount = "staking_total_amount_updated" +) diff --git a/x/assets/types/general.go b/x/assets/types/general.go index 7e98a2651..e6dfe846a 100644 --- a/x/assets/types/general.go +++ b/x/assets/types/general.go @@ -57,6 +57,30 @@ type GeneralClientChainAddr [32]byte type CrossChainOpType uint8 +// String returns the string representation of the CrossChainOpType +func (c CrossChainOpType) String() string { + switch c { + case DepositLST: + return "DepositLST" + case WithdrawLST: + return "WithdrawLST" + case DepositNST: + return "DepositNST" + case WithdrawNST: + return "WithdrawNST" + case WithdrawReward: + return "WithdrawReward" + case DelegateTo: + return "DelegateTo" + case UndelegateFrom: + return "UndelegateFrom" + case Slash: + return "Slash" + default: + return "Unknown" + } +} + type WithdrawerAddress [32]byte // DeltaStakerSingleAsset This is a struct to describe the desired change that matches with diff --git a/x/assets/types/genesis.go b/x/assets/types/genesis.go index 902ee81eb..4759c1145 100644 --- a/x/assets/types/genesis.go +++ b/x/assets/types/genesis.go @@ -86,9 +86,12 @@ func (gs GenesisState) ValidateTokens(lzIDs map[uint64]struct{}) (map[string]mat ) } - // ensure there are no deposits for this asset already (since they are handled in the - // genesis exec). while it is possible to remove this field entirely (and assume 0), - // i did not do so in order to make the genesis state more explicit. + // the initial deposit quantity should be non-negative. + // ideally, we should verify that this quantity sums + // each staker's deposit, but that is not possible solely inside + // this module. + // deposit = free + pending undelegation + delegated, of which + // the last is not stored in this module. if info.StakingTotalAmount.IsNil() || info.StakingTotalAmount.IsNegative() { return errorsmod.Wrapf( diff --git a/x/avs/client/cli/tx.go b/x/avs/client/cli/tx.go index b73524a26..7623857b7 100644 --- a/x/avs/client/cli/tx.go +++ b/x/avs/client/cli/tx.go @@ -116,7 +116,7 @@ func newBuildMsg( taskContractAddress, _ := fs.GetString(FlagTaskContractAddress) taskID, _ := fs.GetUint64(FlagTaskID) - phase, _ := fs.GetUint32(FlagPhase) + phase, _ := fs.GetInt32(FlagPhase) if err := types.ValidatePhase(types.Phase(phase)); err != nil { return nil, err } diff --git a/x/avs/keeper/avs.go b/x/avs/keeper/avs.go index d7bedc00d..741d73639 100644 --- a/x/avs/keeper/avs.go +++ b/x/avs/keeper/avs.go @@ -65,6 +65,8 @@ func (k *Keeper) GetAVSMinimumSelfDelegation(ctx sdk.Context, avsAddr string) (s return sdkmath.LegacyNewDec(int64(avsInfo.Info.MinSelfDelegation)), nil } +// GetAVSUnbondingDuration returns the unbonding number of epochs for an AVS. The name is a misnomer, +// since it is not the duration but the number of epochs. func (k *Keeper) GetAVSUnbondingDuration(ctx sdk.Context, avsAddr string) (uint64, error) { avsInfo, err := k.GetAVSInfo(ctx, avsAddr) if err != nil { @@ -143,28 +145,28 @@ func (k *Keeper) GetTaskStatisticalEpochEndAVSs(ctx sdk.Context, epochIdentifier // GetEpochEndAVSs, GetAVSSupportedAssets, and GetAVSMinimumSelfDelegation. func (k Keeper) RegisterAVSWithChainID( oCtx sdk.Context, params *types.AVSRegisterOrDeregisterParams, -) (avsAddr common.Address, err error) { +) (exists bool, avsAddr common.Address, err error) { // guard against errors ctx, writeFunc := oCtx.CacheContext() // remove the version number and validate params.ChainID = types.ChainIDWithoutRevision(params.ChainID) if len(params.ChainID) == 0 { - return common.Address{}, errorsmod.Wrap(types.ErrNotNull, "RegisterAVSWithChainID: chainID is null") + return false, common.Address{}, errorsmod.Wrap(types.ErrNotNull, "RegisterAVSWithChainID: chainID is null") } avsAddrStr := types.GenerateAVSAddress(params.ChainID) avsAddr = common.HexToAddress(avsAddrStr) // check that the AVS is registered if isAvs, _ := k.IsAVS(ctx, avsAddrStr); isAvs { - return avsAddr, nil + // negligible probability that an independent AVS without this chainID exists + return true, avsAddr, nil } defer func() { - if err == nil { + if err == nil && !exists { // store the reverse lookup from AVSAddress to ChainID // (the forward can be generated on the fly by hashing). k.SetAVSAddrToChainID(ctx, avsAddr, params.ChainID) // write the cache writeFunc() - // TODO: do events need to be handled separately? currently no events emitted so not urgent. } }() // Mark the account as occupied by a contract, so that any transactions that originate @@ -178,22 +180,30 @@ func (k Keeper) RegisterAVSWithChainID( Nonce: k.evmKeeper.GetNewContractNonce(ctx), }, ); err != nil { - return common.Address{}, err + return false, common.Address{}, err } // SetAVSInfo expects HexAddress for the AvsAddress params.AvsAddress = avsAddr params.Action = types.RegisterAction if err := k.UpdateAVSInfo(ctx, params); err != nil { - return common.Address{}, err + return false, common.Address{}, err } - return avsAddr, nil + return false, avsAddr, nil } // SetAVSAddressToChainID stores a lookup from the generated AVS address to the chainID. func (k Keeper) SetAVSAddrToChainID(ctx sdk.Context, avsAddr common.Address, chainID string) { store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefixAVSAddressToChainID) store.Set(avsAddr[:], []byte(chainID)) + // emit an event + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeChainAvsCreated, + sdk.NewAttribute(types.AttributeKeyChainID, chainID), + sdk.NewAttribute(types.AttributeKeyAvsAddress, avsAddr.String()), + ), + ) } // GetChainIDByAVSAddr returns the chainID for a given AVS address. It is a stateful diff --git a/x/avs/keeper/avs_test.go b/x/avs/keeper/avs_test.go index ceb7db869..d62e93b89 100644 --- a/x/avs/keeper/avs_test.go +++ b/x/avs/keeper/avs_test.go @@ -167,6 +167,7 @@ func (suite *AVSTestSuite) TestUpdateAVSInfoWithOperator_Register() { FromAddress: operatorAddress.String(), Info: &operatorTypes.OperatorInfo{ EarningsAddr: operatorAddress.String(), + ApproveAddr: operatorAddress.String(), }, } _, err = suite.OperatorMsgServer.RegisterOperator(sdk.WrapSDKContext(suite.Ctx), registerReq) diff --git a/x/avs/keeper/keeper.go b/x/avs/keeper/keeper.go index e582b5648..a712fbeb1 100644 --- a/x/avs/keeper/keeper.go +++ b/x/avs/keeper/keeper.go @@ -133,7 +133,19 @@ func (k Keeper) UpdateAVSInfo(ctx sdk.Context, params *types.AVSRegisterOrDeregi WhitelistAddresses: params.WhitelistAddresses, } - return k.SetAVSInfo(ctx, avs) + if err := k.SetAVSInfo(ctx, avs); err != nil { + return err + } + // emit the event + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeAvsCreated, + sdk.NewAttribute(types.AttributeKeyAvsAddress, avs.AvsAddress), + ), + ) + + return nil + case types.DeRegisterAction: if avsInfo == nil { return errorsmod.Wrap(types.ErrUnregisterNonExistent, fmt.Sprintf("the avsaddress is :%s", params.AvsAddress)) @@ -432,6 +444,7 @@ func (k Keeper) RaiseAndResolveChallenge(ctx sdk.Context, params *types.Challeng return errorsmod.Wrap(types.ErrEpochNotFound, fmt.Sprintf("epoch info not found %s", avsInfo.EpochIdentifier)) } + // #nosec G115 if epoch.CurrentEpoch <= int64(taskInfo.StartingEpoch)+int64(taskInfo.TaskResponsePeriod)+int64(taskInfo.TaskStatisticalPeriod) { return errorsmod.Wrap( types.ErrSubmitTooSoonError, @@ -442,6 +455,7 @@ func (k Keeper) RaiseAndResolveChallenge(ctx sdk.Context, params *types.Challeng err = k.SetTaskChallengedInfo(ctx, params.TaskID, params.CallerAddress.String(), params.TaskContractAddress) if err != nil { return err + } taskInfo.ActualThreshold = strconv.Itoa(int(params.ActualThreshold)) @@ -577,6 +591,7 @@ func (k Keeper) SubmitTaskResult(ctx sdk.Context, addr string, info *types.TaskR fmt.Sprintf("SetTaskResultInfo:the TaskResponse period has not started , CurrentEpoch:%d", epoch.CurrentEpoch), ) } + // #nosec G115 if epoch.CurrentEpoch > int64(task.StartingEpoch)+int64(task.TaskResponsePeriod)+int64(task.TaskStatisticalPeriod) { return errorsmod.Wrap( types.ErrSubmitTooLateError, diff --git a/x/avs/keeper/multi_operator_submit_task_test.go b/x/avs/keeper/multi_operator_submit_task_test.go index dfa04c217..4d7cd36ca 100644 --- a/x/avs/keeper/multi_operator_submit_task_test.go +++ b/x/avs/keeper/multi_operator_submit_task_test.go @@ -28,6 +28,7 @@ func (suite *AVSTestSuite) prepareOperators() { FromAddress: opAccAddr.String(), Info: &operatorTypes.OperatorInfo{ EarningsAddr: opAccAddr.String(), + ApproveAddr: opAccAddr.String(), }, } _, err = suite.OperatorMsgServer.RegisterOperator(suite.Ctx, registerReq) @@ -52,7 +53,7 @@ func (suite *AVSTestSuite) prepareMulDeposit(assetAddr common.Address, amount sd OpAmount: suite.depositAmount, AssetsAddress: assetAddr[:], } - err := suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParam) + _, err := suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParam) suite.NoError(err) } diff --git a/x/avs/keeper/submit_task_test.go b/x/avs/keeper/submit_task_test.go index 180371b6e..126c374ec 100644 --- a/x/avs/keeper/submit_task_test.go +++ b/x/avs/keeper/submit_task_test.go @@ -29,6 +29,7 @@ func (suite *AVSTestSuite) prepareOperator() { FromAddress: suite.operatorAddr.String(), Info: &operatorTypes.OperatorInfo{ EarningsAddr: suite.operatorAddr.String(), + ApproveAddr: suite.operatorAddr.String(), }, } _, err = s.OperatorMsgServer.RegisterOperator(s.Ctx, registerReq) @@ -52,7 +53,7 @@ func (suite *AVSTestSuite) prepareDeposit(assetAddr common.Address, amount sdkma OpAmount: suite.depositAmount, AssetsAddress: assetAddr[:], } - err := suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParam) + _, err := suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParam) suite.NoError(err) } diff --git a/x/avs/types/events.go b/x/avs/types/events.go new file mode 100644 index 000000000..779e4a214 --- /dev/null +++ b/x/avs/types/events.go @@ -0,0 +1,12 @@ +package types + +// x/avs events +const ( + // AVS creation. TODO: capture more information in the event. + EventTypeAvsCreated = "avs_created" + AttributeKeyAvsAddress = "avs_address" + + // avs with chain-id + EventTypeChainAvsCreated = "chain_avs_created" + AttributeKeyChainID = "chain_id" +) diff --git a/x/delegation/keeper/abci.go b/x/delegation/keeper/abci.go index 0e3667cae..90bbbdd41 100644 --- a/x/delegation/keeper/abci.go +++ b/x/delegation/keeper/abci.go @@ -83,7 +83,7 @@ func (k *Keeper) EndBlock( continue } } else { - err = k.assetsKeeper.UpdateStakerAssetState(cc, record.StakerId, record.AssetId, assetstypes.DeltaStakerSingleAsset{ + _, err = k.assetsKeeper.UpdateStakerAssetState(cc, record.StakerId, record.AssetId, assetstypes.DeltaStakerSingleAsset{ WithdrawableAmount: record.ActualCompletedAmount, PendingUndelegationAmount: recordAmountNeg, }) diff --git a/x/delegation/keeper/delegation.go b/x/delegation/keeper/delegation.go index ac0081420..ad92db718 100644 --- a/x/delegation/keeper/delegation.go +++ b/x/delegation/keeper/delegation.go @@ -8,6 +8,7 @@ import ( assetstype "github.com/ExocoreNetwork/exocore/x/assets/types" delegationtype "github.com/ExocoreNetwork/exocore/x/delegation/types" sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/ethereum/go-ethereum/common/hexutil" ) // DelegateTo : It doesn't need to check the active status of the operator in middlewares when @@ -37,7 +38,6 @@ func (k *Keeper) delegateTo( if notGenesis && k.slashKeeper.IsOperatorFrozen(ctx, params.OperatorAddress) { return delegationtype.ErrOperatorIsFrozen } - stakerID, assetID := assetstype.GetStakerIDAndAssetID(params.ClientChainID, params.StakerAddress, params.AssetsAddress) if assetID != assetstype.ExocoreAssetID { // check if the staker asset has been deposited and the canWithdraw amount is bigger than the delegation amount @@ -51,7 +51,7 @@ func (k *Keeper) delegateTo( } // update staker asset state - err = k.assetsKeeper.UpdateStakerAssetState(ctx, stakerID, assetID, assetstype.DeltaStakerSingleAsset{ + _, err = k.assetsKeeper.UpdateStakerAssetState(ctx, stakerID, assetID, assetstype.DeltaStakerSingleAsset{ WithdrawableAmount: params.OpAmount.Neg(), }) if err != nil { @@ -77,6 +77,17 @@ func (k *Keeper) delegateTo( return err } } + // this emitted event is not the total amount; it is the additional amount. + // indexers must add it to the last known amount to get the total amount. + // non-native case handled within UpdateStakerAssetState + ctx.EventManager().EmitEvent( + sdk.NewEvent( + delegationtype.EventTypeExoAssetDelegation, + sdk.NewAttribute(delegationtype.AttributeKeyStakerID, sdk.AccAddress(params.StakerAddress).String()), + sdk.NewAttribute(delegationtype.AttributeKeyOperator, params.OperatorAddress.String()), + sdk.NewAttribute(delegationtype.AttributeKeyAmount, params.OpAmount.String()), + ), + ) } // calculate the share from the delegation amount share, err := k.CalculateShare(ctx, params.OperatorAddress, assetID, params.OpAmount) @@ -181,8 +192,30 @@ func (k *Keeper) UndelegateFrom(ctx sdk.Context, params *delegationtype.Delegati if err != nil { return err } + + recordKey := r.GetKey() + // emit an event to track the undelegation record identifiers. + // for the ExocoreAssetID undelegation, this event is used to track asset state as well. + // for other undelegations, it is instead tracked from the staker asset state. + ctx.EventManager().EmitEvent( + sdk.NewEvent( + delegationtype.EventTypeUndelegationStarted, + sdk.NewAttribute(delegationtype.AttributeKeyStakerID, r.StakerId), + sdk.NewAttribute(delegationtype.AttributeKeyAssetID, r.AssetId), + sdk.NewAttribute(delegationtype.AttributeKeyOperator, r.OperatorAddr), + sdk.NewAttribute(delegationtype.AttributeKeyRecordID, hexutil.Encode(recordKey)), + // the amount and ActualCompletedAmount are the same unless slashed, which does not happen within this function. + sdk.NewAttribute(delegationtype.AttributeKeyAmount, r.Amount.String()), + sdk.NewAttribute(delegationtype.AttributeKeyCompletedEpochID, r.CompletedEpochIdentifier), + sdk.NewAttribute(delegationtype.AttributeKeyCompletedEpochNumber, fmt.Sprintf("%d", r.CompletedEpochNumber)), + sdk.NewAttribute(delegationtype.AttributeKeyUndelegationID, fmt.Sprintf("%d", r.UndelegationId)), + sdk.NewAttribute(delegationtype.AttributeKeyTxHash, params.TxHash.String()), + sdk.NewAttribute(delegationtype.AttributeKeyBlockNumber, fmt.Sprintf("%d", r.BlockNumber)), + ), + ) + // call the hooks registered by the other modules - return k.Hooks().AfterUndelegationStarted(ctx, params.OperatorAddress, delegationtype.GetUndelegationRecordKey(r.BlockNumber, r.UndelegationId, r.TxHash, r.OperatorAddr)) + return k.Hooks().AfterUndelegationStarted(ctx, params.OperatorAddress, recordKey) } // AssociateOperatorWithStaker marks that a staker is claiming to be associated with an operator. diff --git a/x/delegation/keeper/delegation_op_test.go b/x/delegation/keeper/delegation_op_test.go index b5f5b2dbb..4ffe2deab 100644 --- a/x/delegation/keeper/delegation_op_test.go +++ b/x/delegation/keeper/delegation_op_test.go @@ -43,7 +43,7 @@ func (suite *DelegationTestSuite) prepareDeposit(depositAmount sdkmath.Int) *ass OpAmount: depositAmount, } depositEvent.AssetsAddress = suite.assetAddr[:] - err := suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositEvent) + _, err := suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositEvent) suite.NoError(err) return depositEvent } @@ -62,6 +62,7 @@ func (suite *DelegationTestSuite) prepareDelegation(delegationAmount sdkmath.Int FromAddress: operator.String(), Info: &operatortype.OperatorInfo{ EarningsAddr: operator.String(), + ApproveAddr: operator.String(), }, } _, err := s.OperatorMsgServer.RegisterOperator(s.Ctx, registerReq) @@ -137,6 +138,7 @@ func (suite *DelegationTestSuite) TestDelegateTo() { FromAddress: opAccAddr.String(), Info: &operatortype.OperatorInfo{ EarningsAddr: opAccAddr.String(), + ApproveAddr: opAccAddr.String(), }, } _, err = s.OperatorMsgServer.RegisterOperator(s.Ctx, registerReq) @@ -226,6 +228,7 @@ func (suite *DelegationTestSuite) TestAutoAssociate() { FromAddress: opAccAddr.String(), Info: &operatortype.OperatorInfo{ EarningsAddr: opAccAddr.String(), + ApproveAddr: opAccAddr.String(), }, } _, err := s.OperatorMsgServer.RegisterOperator(s.Ctx, registerReq) diff --git a/x/delegation/keeper/delegation_state.go b/x/delegation/keeper/delegation_state.go index f888fccda..eef7aabca 100644 --- a/x/delegation/keeper/delegation_state.go +++ b/x/delegation/keeper/delegation_state.go @@ -2,6 +2,7 @@ package keeper import ( "fmt" + "slices" errorsmod "cosmossdk.io/errors" sdkmath "cosmossdk.io/math" @@ -35,6 +36,7 @@ func (k Keeper) SetAllDelegationStates(ctx sdk.Context, delegationStates []deleg singleElement := delegationStates[i] bz := k.cdc.MustMarshal(&singleElement.States) store.Set([]byte(singleElement.Key), bz) + // only used at genesis, so no events } return nil } @@ -52,6 +54,7 @@ func (k Keeper) IterateDelegations(ctx sdk.Context, iteratorPrefix []byte, opFun return err } isBreak, err := opFunc(keys, &amounts) + // read-only, so no events if err != nil { return err } @@ -101,6 +104,7 @@ func (k Keeper) TotalDelegatedAmountForStakerAsset(ctx sdk.Context, stakerID str amount = amount.Add(singleAmount) return false, nil } + // read-only, so no event err = k.IterateDelegationsForStakerAndAsset(ctx, stakerID, assetID, opFunc) return amount, err } @@ -121,6 +125,7 @@ func (k *Keeper) AllDelegatedInfoForStakerAsset(ctx sdk.Context, stakerID string if err != nil { return nil, err } + // not used so no event return ret, nil } @@ -172,6 +177,17 @@ func (k Keeper) UpdateDelegationState(ctx sdk.Context, stakerID, assetID, opAddr bz := k.cdc.MustMarshal(&delegationState) store.Set(singleStateKey, bz) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + delegationtype.EventTypeDelegationStateUpdated, + sdk.NewAttribute(delegationtype.AttributeKeyStakerID, stakerID), + sdk.NewAttribute(delegationtype.AttributeKeyAssetID, assetID), + sdk.NewAttribute(delegationtype.AttributeKeyOperatorAddr, opAddr), + sdk.NewAttribute(delegationtype.AttributeKeyWaitUndelegationAmount, deltaAmounts.WaitUndelegationAmount.String()), + sdk.NewAttribute(delegationtype.AttributeKeyUndelegatableShare, deltaAmounts.UndelegatableShare.String()), + ), + ) + return shareIsZero, nil } @@ -211,14 +227,21 @@ func (k *Keeper) AppendStakerForOperator(ctx sdk.Context, operator, assetID, sta if value != nil { k.cdc.MustUnmarshal(value, &stakers) } - for _, v := range stakers.Stakers { - if v == stakerID { - return nil - } + // prefer slices over sdk.SliceContains because we also need to use slices.Index + if slices.Contains(stakers.Stakers, stakerID) { + return nil } stakers.Stakers = append(stakers.Stakers, stakerID) bz := k.cdc.MustMarshal(&stakers) store.Set(Key, bz) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + delegationtype.EventTypeStakerAppended, + sdk.NewAttribute(delegationtype.AttributeKeyStakerID, stakerID), + sdk.NewAttribute(delegationtype.AttributeKeyAssetID, assetID), + sdk.NewAttribute(delegationtype.AttributeKeyOperatorAddr, operator), + ), + ) return nil } @@ -231,12 +254,20 @@ func (k *Keeper) DeleteStakerForOperator(ctx sdk.Context, operator, assetID, sta } value := store.Get(Key) k.cdc.MustUnmarshal(value, &stakers) - for i, v := range stakers.Stakers { - if v == stakerID { - stakers.Stakers = append(stakers.Stakers[:i], stakers.Stakers[i+1:]...) - break - } - } + index := slices.Index(stakers.Stakers, stakerID) + if index == -1 { + // make no change if the staker is not found + return nil + } + stakers.Stakers = append(stakers.Stakers[:index], stakers.Stakers[index+1:]...) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + delegationtype.EventTypeStakerRemoved, + sdk.NewAttribute(delegationtype.AttributeKeyStakerID, stakerID), + sdk.NewAttribute(delegationtype.AttributeKeyAssetID, assetID), + sdk.NewAttribute(delegationtype.AttributeKeyOperatorAddr, operator), + ), + ) bz := k.cdc.MustMarshal(&stakers) store.Set(Key, bz) return nil @@ -248,6 +279,14 @@ func (k *Keeper) DeleteStakersListForOperator(ctx sdk.Context, operator, assetID if !store.Has(Key) { return delegationtype.ErrNoKeyInTheStore } + ctx.EventManager().EmitEvent( + sdk.NewEvent( + delegationtype.EventTypeAllStakersRemoved, + sdk.NewAttribute(delegationtype.AttributeKeyAssetID, assetID), + sdk.NewAttribute(delegationtype.AttributeKeyOperatorAddr, operator), + ), + ) + store.Delete(Key) return nil } @@ -294,6 +333,7 @@ func (k Keeper) SetAllStakerList(ctx sdk.Context, stakersByOperator []delegation bz := k.cdc.MustMarshal(&delegationtype.StakerList{Stakers: singleElement.Stakers}) store.Set([]byte(singleElement.Key), bz) } + // only used at genesis, so no events return nil } @@ -309,6 +349,16 @@ func (k *Keeper) SetStakerShareToZero(ctx sdk.Context, operator, assetID string, delegationState.UndelegatableShare = sdkmath.LegacyZeroDec() bz := k.cdc.MustMarshal(&delegationState) store.Set(singleStateKey, bz) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + delegationtype.EventTypeDelegationStateUpdated, + sdk.NewAttribute(delegationtype.AttributeKeyStakerID, stakerID), + sdk.NewAttribute(delegationtype.AttributeKeyAssetID, assetID), + sdk.NewAttribute(delegationtype.AttributeKeyOperatorAddr, operator), + sdk.NewAttribute(delegationtype.AttributeKeyWaitUndelegationAmount, delegationState.WaitUndelegationAmount.String()), + sdk.NewAttribute(delegationtype.AttributeKeyUndelegatableShare, delegationState.UndelegatableShare.String()), + ), + ) } } return nil @@ -357,12 +407,25 @@ func (k *Keeper) SetAssociatedOperator(ctx sdk.Context, stakerID, operatorAddr s } store := prefix.NewStore(ctx.KVStore(k.storeKey), delegationtype.KeyPrefixAssociatedOperatorByStaker) store.Set([]byte(stakerID), []byte(operatorAddr)) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + delegationtype.EventTypeOperatorAssociated, + sdk.NewAttribute(delegationtype.AttributeKeyStakerID, stakerID), + sdk.NewAttribute(delegationtype.AttributeKeyOperatorAddr, operatorAddr), + ), + ) return nil } func (k *Keeper) DeleteAssociatedOperator(ctx sdk.Context, stakerID string) error { store := prefix.NewStore(ctx.KVStore(k.storeKey), delegationtype.KeyPrefixAssociatedOperatorByStaker) store.Delete([]byte(stakerID)) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + delegationtype.EventTypeOperatorDisassociated, + sdk.NewAttribute(delegationtype.AttributeKeyStakerID, stakerID), + ), + ) return nil } diff --git a/x/delegation/keeper/share.go b/x/delegation/keeper/share.go index 3e3c64905..e235511d3 100644 --- a/x/delegation/keeper/share.go +++ b/x/delegation/keeper/share.go @@ -225,7 +225,7 @@ func (k Keeper) RemoveShare( if assetID != assetstype.ExocoreAssetID { // todo: TotalDepositAmount might be influenced by slash and precision loss, // consider removing it, it can be recalculated from the share for RPC query. - err = k.assetsKeeper.UpdateStakerAssetState(ctx, stakerID, assetID, assetstype.DeltaStakerSingleAsset{ + _, err = k.assetsKeeper.UpdateStakerAssetState(ctx, stakerID, assetID, assetstype.DeltaStakerSingleAsset{ PendingUndelegationAmount: removeToken, }) if err != nil { diff --git a/x/delegation/keeper/un_delegation_state.go b/x/delegation/keeper/un_delegation_state.go index bfd34a867..448e8eb4a 100644 --- a/x/delegation/keeper/un_delegation_state.go +++ b/x/delegation/keeper/un_delegation_state.go @@ -99,6 +99,20 @@ func (k *Keeper) DeleteUndelegationRecord(ctx sdk.Context, record *types.Undeleg store := ctx.KVStore(k.storeKey) // delegate on-hold record for the undelegation store.Delete(types.GetUndelegationOnHoldKey(singleRecKey)) + + // emit an event to track the undelegation record identifiers. + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeUndelegationMatured, + // the amount is the only thing that changes from the original record creation time + // technically we are tracking this number via slashing, but best to include it here + // as well. + sdk.NewAttribute(types.AttributeKeyAmount, record.ActualCompletedAmount.String()), + // everything else can be looked up from the original record identifier + sdk.NewAttribute(types.AttributeKeyRecordID, hexutil.Encode(record.GetKey())), + ), + ) + return nil } @@ -340,6 +354,13 @@ func (k Keeper) IncrementUndelegationHoldCount(ctx sdk.Context, recordKey []byte now := prev + 1 store := ctx.KVStore(k.storeKey) store.Set(types.GetUndelegationOnHoldKey(recordKey), sdk.Uint64ToBigEndian(now)) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeUndelegationHoldCountChanged, + sdk.NewAttribute(types.AttributeKeyRecordID, hexutil.Encode(recordKey)), + sdk.NewAttribute(types.AttributeKeyHoldCount, fmt.Sprintf("%d", now)), + ), + ) return nil } @@ -359,6 +380,13 @@ func (k Keeper) DecrementUndelegationHoldCount(ctx sdk.Context, recordKey []byte now := prev - 1 store := ctx.KVStore(k.storeKey) store.Set(types.GetUndelegationOnHoldKey(recordKey), sdk.Uint64ToBigEndian(now)) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeUndelegationHoldCountChanged, + sdk.NewAttribute(types.AttributeKeyRecordID, hexutil.Encode(recordKey)), + sdk.NewAttribute(types.AttributeKeyHoldCount, fmt.Sprintf("%d", now)), + ), + ) return nil } diff --git a/x/delegation/keeper/update_native_restaking_balance.go b/x/delegation/keeper/update_native_restaking_balance.go index f42b67def..f8bb84c6b 100644 --- a/x/delegation/keeper/update_native_restaking_balance.go +++ b/x/delegation/keeper/update_native_restaking_balance.go @@ -22,7 +22,7 @@ func (k Keeper) UpdateNSTBalance( // think this approach is better. In that case, we would proportionally delegate the // increased amount to all operators to whom the related staker has already delegated // this native token. - err := k.assetsKeeper.UpdateStakerAssetState(ctx, stakerID, assetID, types.DeltaStakerSingleAsset{ + _, err := k.assetsKeeper.UpdateStakerAssetState(ctx, stakerID, assetID, types.DeltaStakerSingleAsset{ TotalDepositAmount: amount, WithdrawableAmount: amount, }) @@ -48,7 +48,7 @@ func (k Keeper) UpdateNSTBalance( } else { pendingSlashAmount = sdkmath.ZeroInt() } - err = k.assetsKeeper.UpdateStakerAssetState(ctx, stakerID, assetID, types.DeltaStakerSingleAsset{ + _, err = k.assetsKeeper.UpdateStakerAssetState(ctx, stakerID, assetID, types.DeltaStakerSingleAsset{ TotalDepositAmount: slashFromWithdrawable.Neg(), WithdrawableAmount: slashFromWithdrawable.Neg(), }) @@ -67,7 +67,7 @@ func (k Keeper) UpdateNSTBalance( slashAmount = undelegation.ActualCompletedAmount } undelegation.ActualCompletedAmount = undelegation.ActualCompletedAmount.Sub(slashAmount) - err = k.assetsKeeper.UpdateStakerAssetState(ctx, stakerID, assetID, types.DeltaStakerSingleAsset{ + _, err = k.assetsKeeper.UpdateStakerAssetState(ctx, stakerID, assetID, types.DeltaStakerSingleAsset{ TotalDepositAmount: slashAmount.Neg(), }) if err != nil { @@ -110,7 +110,7 @@ func (k Keeper) UpdateNSTBalance( if err != nil { return true, err } - err = k.assetsKeeper.UpdateStakerAssetState(ctx, stakerID, assetID, types.DeltaStakerSingleAsset{ + _, err = k.assetsKeeper.UpdateStakerAssetState(ctx, stakerID, assetID, types.DeltaStakerSingleAsset{ TotalDepositAmount: actualSlashAmount.Neg(), }) if err != nil { diff --git a/x/delegation/module.go b/x/delegation/module.go index 4706c84cb..7696af3d3 100644 --- a/x/delegation/module.go +++ b/x/delegation/module.go @@ -94,13 +94,16 @@ func (am AppModule) WeightedOperations(module.SimulationState) []simtypes.Weight return []simtypes.WeightedOperation{} } -// EndBlock executes all ABCI EndBlock logic respective to the claim module. It -// returns no validator updates. +// EndBlock executes all ABCI EndBlock logic respective to this module. +// It returns no validator updates. func (am AppModule) EndBlock(ctx sdk.Context, req abci.RequestEndBlock) []abci.ValidatorUpdate { am.keeper.EndBlock(ctx, req) return []abci.ValidatorUpdate{} } +// BeginBlock executes all ABCI BeginBlock logic respective to this module. +func (AppModule) BeginBlock(sdk.Context, abci.RequestBeginBlock) {} + // DefaultGenesis returns a default GenesisState for the module, marshaled to json.RawMessage. // The default GenesisState need to be defined by the module developer and is primarily used for // testing diff --git a/x/delegation/types/events.go b/x/delegation/types/events.go new file mode 100644 index 000000000..59bf1a3f0 --- /dev/null +++ b/x/delegation/types/events.go @@ -0,0 +1,43 @@ +package types + +const ( + // delegation of exo native asset, since UpdateStakerAssetState is not called for this case + EventTypeExoAssetDelegation = "exo_asset_delegation" + AttributeKeyOperator = "operator" + AttributeKeyAmount = "amount" + + // delegation state + EventTypeDelegationStateUpdated = "delegation_state_updated" + AttributeKeyStakerID = "staker_id" + AttributeKeyAssetID = "asset_id" + AttributeKeyOperatorAddr = "operator" + AttributeKeyUndelegatableShare = "undelegatable_share" + AttributeKeyWaitUndelegationAmount = "wait_undelegation_amount" + + // operator + asset -> staker + EventTypeStakerAppended = "staker_appended" + EventTypeStakerRemoved = "staker_removed" + EventTypeAllStakersRemoved = "all_stakers_removed" + + // staker operator association + EventTypeOperatorAssociated = "operator_associated" + EventTypeOperatorDisassociated = "operator_disassociated" + + // undelegation + EventTypeUndelegationStarted = "undelegation_started" + AttributeKeyRecordID = "record_id" + AttributeKeyCompletedEpochID = "completed_epoch_id" + AttributeKeyCompletedEpochNumber = "completed_epoch_number" + AttributeKeyUndelegationID = "undelegation_id" + AttributeKeyTxHash = "tx_hash" + AttributeKeyBlockNumber = "block_number" + + // undelegation matured + EventTypeUndelegationMatured = "undelegation_matured" + AttributeKeyWithdrawableAmount = "withdrawable_amount" + AttributeKeyPendingUndelegationAmount = "pending_undelegation_amount" + + // undelegation held back or released + EventTypeUndelegationHoldCountChanged = "undelegation_hold_count_changed" + AttributeKeyHoldCount = "hold_count" +) diff --git a/x/delegation/types/expected_keepers.go b/x/delegation/types/expected_keepers.go index 031c95aaf..80f35cee4 100644 --- a/x/delegation/types/expected_keepers.go +++ b/x/delegation/types/expected_keepers.go @@ -42,7 +42,9 @@ type OperatorKeeper interface { } type AssetsKeeper interface { - UpdateStakerAssetState(ctx sdk.Context, stakerID string, assetID string, changeAmount assetstype.DeltaStakerSingleAsset) (err error) + UpdateStakerAssetState( + ctx sdk.Context, stakerID string, assetID string, changeAmount assetstype.DeltaStakerSingleAsset, + ) (info *assetstype.StakerAssetInfo, err error) UpdateOperatorAssetState(ctx sdk.Context, operatorAddr sdk.Address, assetID string, changeAmount assetstype.DeltaOperatorSingleAsset) (err error) GetStakerSpecifiedAssetInfo(ctx sdk.Context, stakerID string, assetID string) (info *assetstype.StakerAssetInfo, err error) GetOperatorSpecifiedAssetInfo(ctx sdk.Context, operatorAddr sdk.Address, assetID string) (info *assetstype.OperatorAssetInfo, err error) diff --git a/x/delegation/types/keys.go b/x/delegation/types/keys.go index 081765c4c..126bfe553 100644 --- a/x/delegation/types/keys.go +++ b/x/delegation/types/keys.go @@ -119,6 +119,11 @@ func GetUndelegationRecordKey(blockHeight, undelegationID uint64, txHash string, ) } +// GetKey returns the key for the undelegation record +func (r *UndelegationRecord) GetKey() []byte { + return GetUndelegationRecordKey(r.BlockNumber, r.UndelegationId, r.TxHash, r.OperatorAddr) +} + type UndelegationKeyFields struct { BlockHeight uint64 UndelegationID uint64 diff --git a/x/dogfood/client/cli/query.go b/x/dogfood/client/cli/query.go index 0749c6d0d..24a0b922d 100644 --- a/x/dogfood/client/cli/query.go +++ b/x/dogfood/client/cli/query.go @@ -4,6 +4,8 @@ import ( "fmt" "strconv" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/spf13/cobra" "github.com/cosmos/cosmos-sdk/client" @@ -32,6 +34,7 @@ func GetQueryCmd(string) *cobra.Command { cmd.AddCommand(CmdQueryUndelegationsToMature()) cmd.AddCommand(CmdQueryUndelegationMaturityEpoch()) cmd.AddCommand(CmdQueryValidator()) + cmd.AddCommand(CmdQueryValidators()) return cmd } @@ -196,8 +199,42 @@ func CmdQueryValidator() *cobra.Command { } queryClient := types.NewQueryClient(clientCtx) address := args[0] - res, err := queryClient.QueryValidator( - cmd.Context(), &types.QueryValidatorRequest{ConsAddr: address}, + consAddress, err := sdk.ConsAddressFromBech32(address) + if err != nil { + return err + } + res, err := queryClient.Validator( + cmd.Context(), &types.QueryValidatorRequest{ConsAddr: consAddress.String()}, + ) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +func CmdQueryValidators() *cobra.Command { + cmd := &cobra.Command{ + Use: "validators", + Short: "shows all validators", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, _ []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := types.NewQueryClient(clientCtx) + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + res, err := queryClient.Validators( + cmd.Context(), &types.QueryAllValidatorsRequest{Pagination: pageReq}, ) if err != nil { return err diff --git a/x/dogfood/keeper/abci.go b/x/dogfood/keeper/abci.go index aa6ee3d76..59a435642 100644 --- a/x/dogfood/keeper/abci.go +++ b/x/dogfood/keeper/abci.go @@ -5,6 +5,7 @@ import ( keytypes "github.com/ExocoreNetwork/exocore/types/keys" "github.com/ExocoreNetwork/exocore/utils" avstypes "github.com/ExocoreNetwork/exocore/x/avs/types" + "github.com/ExocoreNetwork/exocore/x/dogfood/types" abci "github.com/cometbft/cometbft/abci/types" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -12,6 +13,20 @@ import ( func (k Keeper) BeginBlock(ctx sdk.Context) { // for IBC, track historical validator set k.TrackHistoricalInfo(ctx) + // check if event needs to be emitted + if k.ShouldEmitAvsEvent(ctx) { + defer k.ClearEmitAvsEventFlag(ctx) + // emit the event + chainIDWithoutRevision := avstypes.ChainIDWithoutRevision(ctx.ChainID()) + _, avsAddress := k.avsKeeper.IsAVSByChainID(ctx, chainIDWithoutRevision) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeDogfoodAvsCreated, + sdk.NewAttribute(types.AttributeKeyChainIDWithoutRev, chainIDWithoutRevision), + sdk.NewAttribute(types.AttributeKeyAvsAddress, avsAddress), + ), + ) + } } func (k Keeper) EndBlock(ctx sdk.Context) []abci.ValidatorUpdate { diff --git a/x/dogfood/keeper/genesis.go b/x/dogfood/keeper/genesis.go index f7e00f710..1e5da5b40 100644 --- a/x/dogfood/keeper/genesis.go +++ b/x/dogfood/keeper/genesis.go @@ -32,12 +32,13 @@ func (k Keeper) InitGenesis( // the staking assets are validated during AVS registration so we skip it here k.SetParams(ctx, genState.Params) // create the AVS + var exists bool var avsAddr common.Address var err error // the avs module will remove the revision by itself, but we do it here anyway because we need it // to look up operator registration status after this - which is keyed by chainID without revision. chainIDWithoutRevision := avstypes.ChainIDWithoutRevision(ctx.ChainID()) - if avsAddr, err = k.avsKeeper.RegisterAVSWithChainID(ctx, &avstypes.AVSRegisterOrDeregisterParams{ + if exists, avsAddr, err = k.avsKeeper.RegisterAVSWithChainID(ctx, &avstypes.AVSRegisterOrDeregisterParams{ AvsName: chainIDWithoutRevision, AssetIDs: genState.Params.AssetIDs, UnbondingPeriod: uint64(genState.Params.EpochsUntilUnbonded), @@ -47,12 +48,18 @@ func (k Keeper) InitGenesis( }); err != nil { panic(fmt.Errorf("could not create the dogfood AVS: %s", err)) } - avsAddrString := avsAddr.String() k.Logger(ctx).Info( "created dogfood avs", - "avsAddrString", avsAddrString, + "avsAddrString", avsAddr.String(), "chainIDWithoutRevision", chainIDWithoutRevision, ) + if !exists { + // defer an event for BeginBlock, since InitGenesis events are discarded. + // this is unique to x/dogfood, since other modules do not initialize AVSs in InitGenesis, + // unless that AVS already exists in genesis.json, in which case it is picked up by the + // indexer directly anyway. + k.MarkEmitAvsEventFlag(ctx) + } // create the validators out := make([]keytypes.WrappedConsKeyWithPower, 0, len(genState.ValSet)) for _, val := range genState.ValSet { diff --git a/x/dogfood/keeper/impl_epochs_hooks_test.go b/x/dogfood/keeper/impl_epochs_hooks_test.go index 7cec7dfbf..707e545bd 100644 --- a/x/dogfood/keeper/impl_epochs_hooks_test.go +++ b/x/dogfood/keeper/impl_epochs_hooks_test.go @@ -24,6 +24,7 @@ func (suite *KeeperTestSuite) TestSameEpochOperations() { FromAddress: operatorAddressString, Info: &operatortypes.OperatorInfo{ EarningsAddr: operatorAddressString, + ApproveAddr: operatorAddressString, }, } _, err := suite.OperatorMsgServer.RegisterOperator( @@ -50,7 +51,7 @@ func (suite *KeeperTestSuite) TestSameEpochOperations() { AssetsAddress: assetAddr.Bytes(), OpAmount: amount, } - err = suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParams) + _, err = suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParams) suite.NoError(err) // delegate delegationParams := &delegationtypes.DelegationOrUndelegationParams{ @@ -203,6 +204,7 @@ func (suite *KeeperTestSuite) TestDifferentEpochOperations() { FromAddress: operatorAddressString, Info: &operatortypes.OperatorInfo{ EarningsAddr: operatorAddressString, + ApproveAddr: operatorAddressString, }, } _, err := suite.OperatorMsgServer.RegisterOperator( @@ -229,7 +231,7 @@ func (suite *KeeperTestSuite) TestDifferentEpochOperations() { AssetsAddress: assetAddr.Bytes(), OpAmount: amount, } - err = suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParams) + _, err = suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParams) suite.NoError(err) // delegate delegationParams := &delegationtypes.DelegationOrUndelegationParams{ diff --git a/x/dogfood/keeper/keeper.go b/x/dogfood/keeper/keeper.go index 42941549c..bc6bcced7 100644 --- a/x/dogfood/keeper/keeper.go +++ b/x/dogfood/keeper/keeper.go @@ -110,6 +110,27 @@ func (k Keeper) ClearValidatorSetUpdateFlag(ctx sdk.Context) { store.Delete(key) } +// MarkEmitAvsEventFlag marks that an AVS event should be emitted in the BeginBlocker. +func (k Keeper) MarkEmitAvsEventFlag(ctx sdk.Context) { + store := ctx.KVStore(k.storeKey) + key := types.EmitAvsEventKey() + store.Set(key, []byte{1}) +} + +// ShouldEmitAvsEvent returns true if an AVS event should be emitted in the BeginBlocker. +func (k Keeper) ShouldEmitAvsEvent(ctx sdk.Context) bool { + store := ctx.KVStore(k.storeKey) + key := types.EmitAvsEventKey() + return store.Has(key) +} + +// ClearEmitAvsEventFlag clears the AVS event marker. It is called after the AVS event is emitted. +func (k Keeper) ClearEmitAvsEventFlag(ctx sdk.Context) { + store := ctx.KVStore(k.storeKey) + key := types.EmitAvsEventKey() + store.Delete(key) +} + func (k Keeper) mustValidateFields() { types.PanicIfNil(k.storeKey, "storeKey") types.PanicIfNil(k.cdc, "cdc") diff --git a/x/dogfood/keeper/msg_server.go b/x/dogfood/keeper/msg_server.go index 655aac6e1..51b4f6a3e 100644 --- a/x/dogfood/keeper/msg_server.go +++ b/x/dogfood/keeper/msg_server.go @@ -10,7 +10,6 @@ import ( "github.com/ExocoreNetwork/exocore/utils" avstypes "github.com/ExocoreNetwork/exocore/x/avs/types" "github.com/ExocoreNetwork/exocore/x/dogfood/types" - epochstypes "github.com/ExocoreNetwork/exocore/x/epochs/types" sdk "github.com/cosmos/cosmos-sdk/types" govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" ) @@ -52,6 +51,7 @@ func (k Keeper) UpdateParams( "UpdateParams", "overriding EpochsUntilUnbonded with value", prevParams.EpochsUntilUnbonded, ) + // any changes to this param will not affect existing undelegations nextParams.EpochsUntilUnbonded = prevParams.EpochsUntilUnbonded } if nextParams.MaxValidators == 0 { @@ -61,9 +61,8 @@ func (k Keeper) UpdateParams( ) nextParams.MaxValidators = prevParams.MaxValidators } - if err := epochstypes.ValidateEpochIdentifierInterface( - nextParams.EpochIdentifier, - ); err != nil { + // forbid editing the epoch + if nextParams.EpochIdentifier != prevParams.EpochIdentifier { logger.Info( "UpdateParams", "overriding EpochIdentifier with value", prevParams.EpochIdentifier, @@ -92,13 +91,7 @@ func (k Keeper) UpdateParams( nextParams.MinSelfDelegation = prevParams.MinSelfDelegation } // now do stateful validations - if _, found := k.epochsKeeper.GetEpochInfo(c, nextParams.EpochIdentifier); !found { - logger.Info( - "UpdateParams", - "overriding EpochIdentifier with value", prevParams.EpochIdentifier, - ) - nextParams.EpochIdentifier = prevParams.EpochIdentifier - } + // no need to validate the epoch identifier, since it is prohibited to change that. override := false for _, assetID := range nextParams.AssetIDs { if !k.restakingKeeper.IsStakingAsset(c, strings.ToLower(assetID)) { diff --git a/x/dogfood/keeper/opt_out.go b/x/dogfood/keeper/opt_out.go index 7f0adb5b8..8fdc1003e 100644 --- a/x/dogfood/keeper/opt_out.go +++ b/x/dogfood/keeper/opt_out.go @@ -1,6 +1,8 @@ package keeper import ( + "strconv" + "github.com/ExocoreNetwork/exocore/x/dogfood/types" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -18,12 +20,21 @@ func (k Keeper) SetOptOutInformation( // AppendOptOutToFinish appends an operator address to the list of operator addresses that have // opted out and will be finished at the end of the provided epoch. +// The caller must ensure that the operator address is not already in the list. func (k Keeper) AppendOptOutToFinish( ctx sdk.Context, epoch int64, operatorAddr sdk.AccAddress, ) { prev := k.GetOptOutsToFinish(ctx, epoch) next := types.AccountAddresses{List: append(prev, operatorAddr)} k.setOptOutsToFinish(ctx, epoch, next) + + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeOptOutBegan, + sdk.NewAttribute(types.AttributeKeyEpoch, strconv.FormatInt(epoch, 10)), + sdk.NewAttribute(types.AttributeKeyOperator, operatorAddr.String()), + ), + ) } // GetOptOutsToFinish returns the list of operator addresses that have opted out and will be @@ -65,6 +76,12 @@ func (k Keeper) ClearOptOutsToFinish(ctx sdk.Context, epoch int64) { store := ctx.KVStore(k.storeKey) key, _ := types.OptOutsToFinishKey(epoch) store.Delete(key) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeOptOutsFinished, + sdk.NewAttribute(types.AttributeKeyEpoch, strconv.FormatInt(epoch, 10)), + ), + ) } // GetAllOptOutsToFinish gets a list of epochs and the corresponding operator addresses @@ -139,6 +156,13 @@ func (k Keeper) AppendConsensusAddrToPrune( prev := k.GetConsensusAddrsToPrune(ctx, epoch) next := types.ConsensusAddresses{List: append(prev, operatorAddr)} k.setConsensusAddrsToPrune(ctx, epoch, next) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeConsAddrPruningScheduled, + sdk.NewAttribute(types.AttributeKeyEpoch, strconv.FormatInt(epoch, 10)), + sdk.NewAttribute(types.AttributeKeyConsAddr, operatorAddr.String()), + ), + ) } // GetConsensusAddrsToPrune returns the list of consensus addresses to prune at the end of the @@ -165,6 +189,12 @@ func (k Keeper) ClearConsensusAddrsToPrune(ctx sdk.Context, epoch int64) { store := ctx.KVStore(k.storeKey) key, _ := types.ConsensusAddrsToPruneKey(epoch) store.Delete(key) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeConsAddrsPruned, + sdk.NewAttribute(types.AttributeKeyEpoch, strconv.FormatInt(epoch, 10)), + ), + ) } // setConsensusAddrsToPrune sets the list of consensus addresses to prune at the end of the diff --git a/x/dogfood/keeper/opt_out_test.go b/x/dogfood/keeper/opt_out_test.go index 7f84a32c9..0fd94d1e8 100644 --- a/x/dogfood/keeper/opt_out_test.go +++ b/x/dogfood/keeper/opt_out_test.go @@ -23,6 +23,7 @@ func (suite *KeeperTestSuite) TestBasicOperations() { FromAddress: operatorAddressString, Info: &operatortypes.OperatorInfo{ EarningsAddr: operatorAddressString, + ApproveAddr: operatorAddressString, }, } _, err := suite.OperatorMsgServer.RegisterOperator(sdk.WrapSDKContext(suite.Ctx), registerReq) @@ -64,7 +65,7 @@ func (suite *KeeperTestSuite) TestBasicOperations() { AssetsAddress: assetAddr.Bytes(), OpAmount: amount, } - err = suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParams) + _, err = suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParams) suite.NoError(err) suite.CheckLengthOfValidatorUpdates(0, nil, "deposit but don't delegate") // then delegate it @@ -94,7 +95,7 @@ func (suite *KeeperTestSuite) TestBasicOperations() { AssetsAddress: assetAddr.Bytes(), OpAmount: additionalAmount, } - err = suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParams) + _, err = suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParams) suite.NoError(err) suite.CheckLengthOfValidatorUpdates(0, nil, "deposit above min but don't delegate") delegationParams = &delegationtypes.DelegationOrUndelegationParams{ @@ -134,7 +135,7 @@ func (suite *KeeperTestSuite) TestBasicOperations() { AssetsAddress: assetAddr.Bytes(), OpAmount: amount, } - err = suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParams) + _, err = suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParams) suite.NoError(err) suite.CheckLengthOfValidatorUpdates(0, nil, "deposit (non-self) but don't delegate") delegationParams = &delegationtypes.DelegationOrUndelegationParams{ @@ -168,7 +169,7 @@ func (suite *KeeperTestSuite) TestBasicOperations() { AssetsAddress: assetAddr.Bytes(), OpAmount: amount, } - err = suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParams) + _, err = suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParams) suite.NoError(err) delegationParams = &delegationtypes.DelegationOrUndelegationParams{ ClientChainID: lzID, @@ -203,7 +204,7 @@ func (suite *KeeperTestSuite) TestBasicOperations() { AssetsAddress: assetAddr.Bytes(), OpAmount: amount, } - err = suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParams) + _, err = suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParams) suite.NoError(err) delegationParams = &delegationtypes.DelegationOrUndelegationParams{ ClientChainID: lzID, diff --git a/x/dogfood/keeper/query.go b/x/dogfood/keeper/query.go index 0208d7632..a9acfa3e2 100644 --- a/x/dogfood/keeper/query.go +++ b/x/dogfood/keeper/query.go @@ -3,6 +3,9 @@ package keeper import ( "context" + "github.com/cosmos/cosmos-sdk/store/prefix" + "github.com/cosmos/cosmos-sdk/types/query" + "github.com/ExocoreNetwork/exocore/x/dogfood/types" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -10,9 +13,17 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" ) -var _ types.QueryServer = Keeper{} +type Querier struct { + Keeper +} + +var _ types.QueryServer = &Querier{} + +func NewQueryServer(keeper Keeper) types.QueryServer { + return &Querier{Keeper: keeper} +} -func (k Keeper) Params( +func (q Querier) Params( goCtx context.Context, req *types.QueryParamsRequest, ) (*types.QueryParamsResponse, error) { @@ -21,10 +32,10 @@ func (k Keeper) Params( } ctx := sdk.UnwrapSDKContext(goCtx) - return &types.QueryParamsResponse{Params: k.GetDogfoodParams(ctx)}, nil + return &types.QueryParamsResponse{Params: q.Keeper.GetDogfoodParams(ctx)}, nil } -func (k Keeper) OptOutsToFinish( +func (q Querier) OptOutsToFinish( goCtx context.Context, req *types.QueryOptOutsToFinishRequest, ) (*types.AccountAddresses, error) { @@ -34,12 +45,12 @@ func (k Keeper) OptOutsToFinish( ctx := sdk.UnwrapSDKContext(goCtx) epoch := req.Epoch - addresses := k.GetOptOutsToFinish(ctx, epoch) + addresses := q.Keeper.GetOptOutsToFinish(ctx, epoch) // TODO: consider converting this to a slice of strings? return &types.AccountAddresses{List: addresses}, nil } -func (k Keeper) OperatorOptOutFinishEpoch( +func (q Querier) OperatorOptOutFinishEpoch( goCtx context.Context, req *types.QueryOperatorOptOutFinishEpochRequest, ) (*types.QueryOperatorOptOutFinishEpochResponse, error) { @@ -51,11 +62,11 @@ func (k Keeper) OperatorOptOutFinishEpoch( if err != nil { return nil, status.Error(codes.InvalidArgument, "invalid operator address") } - epoch := k.GetOperatorOptOutFinishEpoch(ctx, accAddr) + epoch := q.Keeper.GetOperatorOptOutFinishEpoch(ctx, accAddr) return &types.QueryOperatorOptOutFinishEpochResponse{Epoch: epoch}, nil } -func (k Keeper) UndelegationsToMature( +func (q Querier) UndelegationsToMature( goCtx context.Context, req *types.QueryUndelegationsToMatureRequest, ) (*types.UndelegationRecordKeys, error) { @@ -64,11 +75,11 @@ func (k Keeper) UndelegationsToMature( } ctx := sdk.UnwrapSDKContext(goCtx) epoch := req.Epoch - keys := k.GetUndelegationsToMature(ctx, epoch) + keys := q.Keeper.GetUndelegationsToMature(ctx, epoch) return &types.UndelegationRecordKeys{List: keys}, nil } -func (k Keeper) UndelegationMaturityEpoch( +func (q Querier) UndelegationMaturityEpoch( goCtx context.Context, req *types.QueryUndelegationMaturityEpochRequest, ) (*types.QueryUndelegationMaturityEpochResponse, error) { @@ -76,17 +87,17 @@ func (k Keeper) UndelegationMaturityEpoch( return nil, status.Error(codes.InvalidArgument, "invalid request") } ctx := sdk.UnwrapSDKContext(goCtx) - epoch, found := k.GetUndelegationMaturityEpoch(ctx, []byte(req.RecordKey)) + epoch, found := q.Keeper.GetUndelegationMaturityEpoch(ctx, []byte(req.RecordKey)) if !found { return nil, status.Error(codes.NotFound, "undelegation record not found") } return &types.QueryUndelegationMaturityEpochResponse{Epoch: epoch}, nil } -func (k Keeper) QueryValidator( +func (q Querier) Validator( goCtx context.Context, req *types.QueryValidatorRequest, -) (*types.ExocoreValidator, error) { +) (*types.QueryValidatorResponse, error) { if req == nil { return nil, status.Error(codes.InvalidArgument, "invalid request") } @@ -96,9 +107,40 @@ func (k Keeper) QueryValidator( if err != nil { return nil, status.Error(codes.InvalidArgument, "invalid consensus address") } - validator, found := k.GetExocoreValidator(ctx, consAddressBytes) + validator, found := q.Keeper.GetExocoreValidator(ctx, consAddressBytes) if !found { return nil, status.Error(codes.NotFound, "validator not found") } - return &validator, nil + return &types.QueryValidatorResponse{Validator: &validator}, nil +} + +func (q Querier) Validators( + goCtx context.Context, + req *types.QueryAllValidatorsRequest, +) (*types.QueryAllValidatorsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(goCtx) + + store := ctx.KVStore(q.Keeper.storeKey) + valStore := prefix.NewStore(store, []byte{types.ExocoreValidatorBytePrefix}) + + validators, pageRes, err := query.GenericFilteredPaginate( + q.Keeper.cdc, valStore, req.Pagination, func(_ []byte, val *types.ExocoreValidator) (*types.ExocoreValidator, error) { + return val, nil + }, func() *types.ExocoreValidator { + return &types.ExocoreValidator{} + }) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + // convert pointer to value + vals := make([]types.ExocoreValidator, len(validators)) + for i, val := range validators { + vals[i] = *val + } + + return &types.QueryAllValidatorsResponse{Validators: vals, Pagination: pageRes}, nil } diff --git a/x/dogfood/keeper/unbonding.go b/x/dogfood/keeper/unbonding.go index 9ceba5264..934fb7f9b 100644 --- a/x/dogfood/keeper/unbonding.go +++ b/x/dogfood/keeper/unbonding.go @@ -1,6 +1,8 @@ package keeper import ( + "strconv" + "github.com/ExocoreNetwork/exocore/x/dogfood/types" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/ethereum/go-ethereum/common/hexutil" @@ -32,6 +34,14 @@ func (k Keeper) AppendUndelegationToMature( List: append(prev, recordKey), } k.setUndelegationsToMature(ctx, epoch, next) + + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeUndelegationMaturityScheduled, + sdk.NewAttribute(types.AttributeKeyEpoch, strconv.FormatInt(epoch, 10)), + sdk.NewAttribute(types.AttributeKeyRecordID, hexutil.Encode(recordKey)), + ), + ) } // GetUndelegationsToMature returns all undelegation entries that should be released @@ -61,6 +71,12 @@ func (k Keeper) ClearUndelegationsToMature( store := ctx.KVStore(k.storeKey) key, _ := types.UnbondingReleaseMaturityKey(epoch) store.Delete(key) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeUndelegationsMatured, + sdk.NewAttribute(types.AttributeKeyEpoch, strconv.FormatInt(epoch, 10)), + ), + ) } // setUndelegationsToMature sets all undelegation entries that should be released diff --git a/x/dogfood/keeper/unbonding_test.go b/x/dogfood/keeper/unbonding_test.go index c176b689b..86c342115 100644 --- a/x/dogfood/keeper/unbonding_test.go +++ b/x/dogfood/keeper/unbonding_test.go @@ -21,6 +21,7 @@ func (suite *KeeperTestSuite) TestUndelegations() { FromAddress: operatorAddressString, Info: &operatortypes.OperatorInfo{ EarningsAddr: operatorAddressString, + ApproveAddr: operatorAddressString, }, } _, err := suite.OperatorMsgServer.RegisterOperator( @@ -47,7 +48,7 @@ func (suite *KeeperTestSuite) TestUndelegations() { AssetsAddress: assetAddr.Bytes(), OpAmount: amount, } - err = suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParams) + _, err = suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParams) suite.NoError(err) // delegate delegationParams := &delegationtypes.DelegationOrUndelegationParams{ @@ -157,6 +158,7 @@ func (suite *KeeperTestSuite) TestUndelegationEdgeCases() { FromAddress: operatorAddressString, Info: &operatortypes.OperatorInfo{ EarningsAddr: operatorAddressString, + ApproveAddr: operatorAddressString, }, } _, err := suite.OperatorMsgServer.RegisterOperator( @@ -184,7 +186,7 @@ func (suite *KeeperTestSuite) TestUndelegationEdgeCases() { AssetsAddress: assetAddr.Bytes(), OpAmount: amount.Mul(sdkmath.NewInt(5)), } - err = suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParams) + _, err = suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParams) suite.NoError(err) // delegate delegationParams := &delegationtypes.DelegationOrUndelegationParams{ diff --git a/x/dogfood/keeper/validators.go b/x/dogfood/keeper/validators.go index e9b223013..05a6dbbf1 100644 --- a/x/dogfood/keeper/validators.go +++ b/x/dogfood/keeper/validators.go @@ -322,6 +322,12 @@ func (k Keeper) SetLastTotalPower(ctx sdk.Context, power math.Int) { store := ctx.KVStore(k.storeKey) bz := k.cdc.MustMarshal(&sdk.IntProto{Int: power}) store.Set(types.LastTotalPowerKey(), bz) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeLastTotalPowerUpdated, + sdk.NewAttribute(types.AttributeKeyLastTotalPower, power.String()), + ), + ) } // SetValidatorUpdates sets the ABCI validator power updates for the current block. diff --git a/x/dogfood/module.go b/x/dogfood/module.go index e0dd687c0..d4604636e 100644 --- a/x/dogfood/module.go +++ b/x/dogfood/module.go @@ -129,7 +129,7 @@ func NewAppModule( // queries func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper)) - types.RegisterQueryServer(cfg.QueryServer(), am.keeper) + types.RegisterQueryServer(cfg.QueryServer(), keeper.NewQueryServer(am.keeper)) } // RegisterInvariants registers the invariants of the module. If an invariant deviates from its diff --git a/x/dogfood/types/events.go b/x/dogfood/types/events.go new file mode 100644 index 000000000..476cd204a --- /dev/null +++ b/x/dogfood/types/events.go @@ -0,0 +1,32 @@ +package types + +// x/dogfood events +const ( + // since the AVS is created at genesis, we manually emit this event. + EventTypeDogfoodAvsCreated = "dogfood_avs_created" + AttributeKeyChainIDWithoutRev = "chain_id" + AttributeKeyAvsAddress = "avs_address" + + // emitted when the last total power is set, also, implying that + // the validator set has changed. + EventTypeLastTotalPowerUpdated = "last_total_power_updated" + AttributeKeyLastTotalPower = "last_total_power" + + // emitted when an operator opts out and will be finished at the end of the provided epoch. + EventTypeOptOutBegan = "opt_out_began" + AttributeKeyEpoch = "epoch" + AttributeKeyOperator = "operator" + EventTypeOptOutsFinished = "opt_outs_finished" + + // emitted when a consensus address is added to the list of consensus addresses to prune + // at the end of the epoch. + EventTypeConsAddrPruningScheduled = "cons_addr_pruning_scheduled" + AttributeKeyConsAddr = "cons_addr" + EventTypeConsAddrsPruned = "cons_addrs_pruned" + + // emitted when an undelegation is added to the list of undelegations to mature + // at the end of the epoch. + EventTypeUndelegationMaturityScheduled = "undelegation_maturity_scheduled" + AttributeKeyRecordID = "record_id" + EventTypeUndelegationsMatured = "undelegations_matured" +) diff --git a/x/dogfood/types/expected_keepers.go b/x/dogfood/types/expected_keepers.go index 454aebddf..2979af6fc 100644 --- a/x/dogfood/types/expected_keepers.go +++ b/x/dogfood/types/expected_keepers.go @@ -99,7 +99,7 @@ type AssetsKeeper interface { } type AVSKeeper interface { - RegisterAVSWithChainID(sdk.Context, *avstypes.AVSRegisterOrDeregisterParams) (common.Address, error) + RegisterAVSWithChainID(sdk.Context, *avstypes.AVSRegisterOrDeregisterParams) (bool, common.Address, error) IsAVSByChainID(ctx sdk.Context, chainID string) (bool, string) GetAVSSupportedAssets(ctx sdk.Context, avsAddr string) (map[string]interface{}, error) UpdateAVSInfo(ctx sdk.Context, params *avstypes.AVSRegisterOrDeregisterParams) error diff --git a/x/dogfood/types/keys.go b/x/dogfood/types/keys.go index 20528d2c4..ded7567e1 100644 --- a/x/dogfood/types/keys.go +++ b/x/dogfood/types/keys.go @@ -75,6 +75,9 @@ const ( // ParamsByte is the single-byte key for the params store. ParamsByte + + // EmitAvsEventByte is the single-byte key for the emit AVS event store. + EmitAvsEventByte ) // ExocoreValidatorKey returns the key for the validator store. @@ -198,3 +201,8 @@ func ValidatorUpdatesKey() []byte { func ParamsKey() []byte { return []byte{ParamsByte} } + +// EmitAvsEventKey returns the key for the emit AVS event store. +func EmitAvsEventKey() []byte { + return []byte{EmitAvsEventByte} +} diff --git a/x/dogfood/types/query.pb.go b/x/dogfood/types/query.pb.go index e82841483..9abe0f93b 100644 --- a/x/dogfood/types/query.pb.go +++ b/x/dogfood/types/query.pb.go @@ -6,6 +6,7 @@ package types import ( context "context" fmt "fmt" + query "github.com/cosmos/cosmos-sdk/types/query" _ "github.com/cosmos/gogoproto/gogoproto" grpc1 "github.com/cosmos/gogoproto/grpc" proto "github.com/cosmos/gogoproto/proto" @@ -397,7 +398,7 @@ func (m *QueryUndelegationMaturityEpochResponse) GetEpoch() int64 { return 0 } -// QueryValidatorRequest is request type for the Query/QueryValidator RPC method. +// QueryValidatorRequest is request type for the Query/Validator RPC method. type QueryValidatorRequest struct { // cons_addr is the consensus address of the validator being queried. From the perspective of // this module, the acc_addr is not relevant and is thus not stored. @@ -444,6 +445,153 @@ func (m *QueryValidatorRequest) GetConsAddr() string { return "" } +// QueryValidatorResponse is response type for the Query/Validator RPC method. +type QueryValidatorResponse struct { + // validator is the validator being queried. + Validator *ExocoreValidator `protobuf:"bytes,1,opt,name=validator,proto3" json:"validator,omitempty"` +} + +func (m *QueryValidatorResponse) Reset() { *m = QueryValidatorResponse{} } +func (m *QueryValidatorResponse) String() string { return proto.CompactTextString(m) } +func (*QueryValidatorResponse) ProtoMessage() {} +func (*QueryValidatorResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_e23d51a3dceb1c68, []int{9} +} +func (m *QueryValidatorResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryValidatorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryValidatorResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryValidatorResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryValidatorResponse.Merge(m, src) +} +func (m *QueryValidatorResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryValidatorResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryValidatorResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryValidatorResponse proto.InternalMessageInfo + +func (m *QueryValidatorResponse) GetValidator() *ExocoreValidator { + if m != nil { + return m.Validator + } + return nil +} + +// QueryAllValidatorsRequest is request type for the Query/AllValidators RPC method. +type QueryAllValidatorsRequest struct { + // pagination defines an optional pagination for the request. + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryAllValidatorsRequest) Reset() { *m = QueryAllValidatorsRequest{} } +func (m *QueryAllValidatorsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryAllValidatorsRequest) ProtoMessage() {} +func (*QueryAllValidatorsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_e23d51a3dceb1c68, []int{10} +} +func (m *QueryAllValidatorsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllValidatorsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllValidatorsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllValidatorsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllValidatorsRequest.Merge(m, src) +} +func (m *QueryAllValidatorsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryAllValidatorsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllValidatorsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllValidatorsRequest proto.InternalMessageInfo + +func (m *QueryAllValidatorsRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryAllValidatorsResponse is response type for the Query/AllValidators RPC method. +type QueryAllValidatorsResponse struct { + // validators is the list of all validators. + Validators []ExocoreValidator `protobuf:"bytes,1,rep,name=validators,proto3" json:"validators"` + // pagination defines the pagination in the response. + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryAllValidatorsResponse) Reset() { *m = QueryAllValidatorsResponse{} } +func (m *QueryAllValidatorsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryAllValidatorsResponse) ProtoMessage() {} +func (*QueryAllValidatorsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_e23d51a3dceb1c68, []int{11} +} +func (m *QueryAllValidatorsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllValidatorsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllValidatorsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllValidatorsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllValidatorsResponse.Merge(m, src) +} +func (m *QueryAllValidatorsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryAllValidatorsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllValidatorsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllValidatorsResponse proto.InternalMessageInfo + +func (m *QueryAllValidatorsResponse) GetValidators() []ExocoreValidator { + if m != nil { + return m.Validators + } + return nil +} + +func (m *QueryAllValidatorsResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + func init() { proto.RegisterType((*QueryParamsRequest)(nil), "exocore.dogfood.v1.QueryParamsRequest") proto.RegisterType((*QueryParamsResponse)(nil), "exocore.dogfood.v1.QueryParamsResponse") @@ -454,55 +602,69 @@ func init() { proto.RegisterType((*QueryUndelegationMaturityEpochRequest)(nil), "exocore.dogfood.v1.QueryUndelegationMaturityEpochRequest") proto.RegisterType((*QueryUndelegationMaturityEpochResponse)(nil), "exocore.dogfood.v1.QueryUndelegationMaturityEpochResponse") proto.RegisterType((*QueryValidatorRequest)(nil), "exocore.dogfood.v1.QueryValidatorRequest") + proto.RegisterType((*QueryValidatorResponse)(nil), "exocore.dogfood.v1.QueryValidatorResponse") + proto.RegisterType((*QueryAllValidatorsRequest)(nil), "exocore.dogfood.v1.QueryAllValidatorsRequest") + proto.RegisterType((*QueryAllValidatorsResponse)(nil), "exocore.dogfood.v1.QueryAllValidatorsResponse") } func init() { proto.RegisterFile("exocore/dogfood/v1/query.proto", fileDescriptor_e23d51a3dceb1c68) } var fileDescriptor_e23d51a3dceb1c68 = []byte{ - // 682 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xd1, 0x4e, 0xd4, 0x4c, - 0x14, 0xde, 0xfe, 0xbf, 0x10, 0x18, 0x13, 0x89, 0x23, 0x24, 0x52, 0xb0, 0x60, 0xa3, 0x28, 0x98, - 0x74, 0x5c, 0x10, 0x23, 0x6a, 0x48, 0x20, 0x82, 0x17, 0x04, 0xd1, 0x15, 0xbd, 0xf0, 0xa6, 0x29, - 0xed, 0x50, 0x1a, 0xa0, 0xa7, 0xcc, 0x4c, 0x91, 0x0d, 0xd9, 0x1b, 0x9f, 0xc0, 0x44, 0x1f, 0xc0, - 0xb7, 0x30, 0xbe, 0x01, 0x97, 0x24, 0xde, 0x70, 0x65, 0x14, 0x7c, 0x10, 0xd3, 0xe9, 0x74, 0x5d, - 0xd8, 0xd9, 0x6e, 0xe2, 0x5d, 0x77, 0xe6, 0x7c, 0xe7, 0x7c, 0xdf, 0x39, 0xe7, 0x9b, 0x45, 0x16, - 0x3d, 0x00, 0x1f, 0x18, 0x25, 0x01, 0x84, 0x9b, 0x00, 0x01, 0xd9, 0xaf, 0x92, 0xbd, 0x94, 0xb2, - 0xba, 0x93, 0x30, 0x10, 0x80, 0xb1, 0xba, 0x77, 0xd4, 0xbd, 0xb3, 0x5f, 0x35, 0xc7, 0x35, 0x98, - 0xe2, 0x5a, 0xa2, 0xcc, 0x31, 0x4d, 0x44, 0xe2, 0x31, 0x6f, 0x97, 0xab, 0x80, 0xc1, 0x10, 0x42, - 0x90, 0x9f, 0x24, 0xfb, 0x52, 0xa7, 0xa3, 0x21, 0x40, 0xb8, 0x43, 0x89, 0x97, 0x44, 0xc4, 0x8b, - 0x63, 0x10, 0x9e, 0x88, 0x20, 0x56, 0x18, 0x7b, 0x10, 0xe1, 0x57, 0x19, 0xb3, 0x97, 0x32, 0x51, - 0x8d, 0xee, 0xa5, 0x94, 0x0b, 0x7b, 0x0d, 0x5d, 0x3b, 0x77, 0xca, 0x13, 0x88, 0x39, 0xc5, 0x8f, - 0x50, 0x6f, 0x5e, 0xf0, 0xba, 0x31, 0x6e, 0xdc, 0xbd, 0x3c, 0x6d, 0x3a, 0xed, 0x42, 0x9c, 0x1c, - 0xb3, 0x78, 0xe9, 0xe8, 0xc7, 0x58, 0xa5, 0xa6, 0xe2, 0xed, 0x19, 0x34, 0x22, 0x13, 0xae, 0x25, - 0x62, 0x2d, 0x15, 0x7c, 0x1d, 0x96, 0xa3, 0x38, 0xe2, 0x5b, 0xaa, 0x1e, 0x1e, 0x44, 0x3d, 0x34, - 0x01, 0x7f, 0x4b, 0xe6, 0xfd, 0xbf, 0x96, 0xff, 0xb0, 0x5f, 0xa3, 0xdb, 0x0a, 0x44, 0x99, 0x27, - 0x80, 0xe5, 0xe0, 0x1c, 0xb9, 0x94, 0x45, 0x14, 0xf0, 0x29, 0x74, 0x15, 0x54, 0x8c, 0xeb, 0xf9, - 0xbe, 0xeb, 0x05, 0x01, 0x93, 0xa9, 0xfa, 0x6b, 0x03, 0xc5, 0xc5, 0x82, 0xef, 0x2f, 0x04, 0x01, - 0xb3, 0xe7, 0xd1, 0x44, 0xb7, 0xa4, 0x4a, 0xad, 0x9e, 0xd4, 0x1c, 0xba, 0x29, 0xf1, 0x6f, 0xe2, - 0x80, 0xee, 0xd0, 0x30, 0x6f, 0xe6, 0x3a, 0xac, 0x7a, 0x22, 0x65, 0xb4, 0x5c, 0xcf, 0xb2, 0xd2, - 0xd3, 0x0a, 0x95, 0xb8, 0x48, 0xd4, 0xcf, 0xe9, 0xb9, 0x81, 0x10, 0xa3, 0x3e, 0xb0, 0xc0, 0xdd, - 0xa6, 0x75, 0x25, 0xa4, 0x3f, 0x3f, 0x59, 0xa1, 0xf5, 0xa6, 0x84, 0x92, 0x3c, 0xa5, 0x12, 0x1e, - 0xa0, 0x21, 0x89, 0x7f, 0xeb, 0xed, 0x44, 0x41, 0xd6, 0x83, 0xa2, 0xee, 0x08, 0xea, 0xf7, 0x21, - 0xe6, 0xad, 0xfd, 0xeb, 0xcb, 0x0e, 0xb2, 0xc6, 0x4d, 0x7f, 0xeb, 0x43, 0x3d, 0x12, 0x86, 0x1b, - 0xa8, 0x37, 0x1f, 0x32, 0x9e, 0xd0, 0x2d, 0x40, 0xfb, 0x3e, 0x99, 0x77, 0xba, 0xc6, 0xe5, 0x84, - 0x6d, 0xfb, 0xc3, 0xf7, 0xdf, 0x9f, 0xfe, 0x1b, 0xc5, 0x26, 0xe9, 0xb8, 0xec, 0xf8, 0x8b, 0x81, - 0x06, 0x2e, 0xec, 0x11, 0x26, 0x1d, 0x0b, 0xe8, 0x37, 0xce, 0xbc, 0xa5, 0x03, 0x2c, 0xf8, 0x3e, - 0xa4, 0xb1, 0xc8, 0xe4, 0x52, 0xce, 0x29, 0xb7, 0x1f, 0x4a, 0x3a, 0xf7, 0xb1, 0xa3, 0xa3, 0x03, - 0x89, 0x70, 0x21, 0x15, 0xdc, 0x15, 0xe0, 0x6e, 0xca, 0xe4, 0xe4, 0x50, 0x36, 0xb8, 0x81, 0x7f, - 0x19, 0x68, 0xb8, 0xe3, 0x82, 0xe1, 0xb9, 0x12, 0xb2, 0xe5, 0x9b, 0x6e, 0x3e, 0xfe, 0x17, 0xa8, - 0xea, 0xed, 0xaa, 0x14, 0xf3, 0x1c, 0x2f, 0xe9, 0xc5, 0x28, 0xff, 0x28, 0x55, 0x4a, 0x91, 0x2b, - 0x05, 0x91, 0xc3, 0x36, 0x7b, 0x35, 0xf0, 0x57, 0x03, 0x0d, 0x69, 0x4d, 0x80, 0x67, 0x3b, 0x92, - 0x2c, 0x33, 0x8d, 0x39, 0xa5, 0x83, 0xb5, 0x22, 0x6a, 0x85, 0x03, 0xb8, 0xfd, 0x44, 0x6a, 0x99, - 0xc5, 0x33, 0x3a, 0x2d, 0x69, 0x6b, 0x95, 0x6c, 0x3a, 0xbb, 0xb2, 0x4e, 0x73, 0x3a, 0x27, 0x06, - 0x1a, 0xee, 0xe8, 0x9d, 0x92, 0xe9, 0x74, 0xf3, 0x6d, 0xc9, 0x74, 0xba, 0x5a, 0xd5, 0x7e, 0x26, - 0x15, 0xcd, 0xe3, 0xa7, 0xdd, 0x14, 0xe5, 0x6a, 0x22, 0x51, 0x2f, 0x66, 0xf3, 0xf7, 0xa9, 0x68, - 0xe0, 0xcf, 0x06, 0xba, 0x72, 0xde, 0xdb, 0x78, 0xb2, 0x23, 0xa9, 0x8b, 0xfe, 0xd7, 0x9b, 0x62, - 0x29, 0x3f, 0x6a, 0x06, 0xdb, 0x55, 0xc9, 0xf4, 0x1e, 0x9e, 0xd4, 0x31, 0xdd, 0x2f, 0xc2, 0xc8, - 0x61, 0xf3, 0x29, 0x69, 0x2c, 0xae, 0x1c, 0x9d, 0x5a, 0xc6, 0xf1, 0xa9, 0x65, 0xfc, 0x3c, 0xb5, - 0x8c, 0x8f, 0x67, 0x56, 0xe5, 0xf8, 0xcc, 0xaa, 0x9c, 0x9c, 0x59, 0x95, 0x77, 0xd5, 0x30, 0x12, - 0x5b, 0xe9, 0x86, 0xe3, 0xc3, 0x2e, 0x51, 0x95, 0x5e, 0x50, 0xf1, 0x1e, 0xd8, 0x76, 0x33, 0xfb, - 0x41, 0x33, 0xbf, 0xa8, 0x27, 0x94, 0x6f, 0xf4, 0xca, 0x7f, 0xae, 0x99, 0x3f, 0x01, 0x00, 0x00, - 0xff, 0xff, 0xbd, 0xc8, 0xe6, 0x4d, 0x66, 0x07, 0x00, 0x00, + // 859 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0x4f, 0x4f, 0xdc, 0x46, + 0x1c, 0x5d, 0x43, 0x41, 0xdd, 0xe9, 0x01, 0x75, 0x0a, 0x55, 0x31, 0xd4, 0x50, 0xab, 0x85, 0xb2, + 0xa8, 0x76, 0x17, 0x4a, 0x55, 0xda, 0x0a, 0x75, 0x91, 0x00, 0xb5, 0xa8, 0x85, 0x6e, 0x69, 0x0f, + 0x55, 0xa5, 0x95, 0xd7, 0x1e, 0x8c, 0xc5, 0xae, 0xc7, 0x78, 0xc6, 0x5b, 0x56, 0x68, 0x2f, 0x9c, + 0x72, 0x8c, 0x94, 0x4b, 0x94, 0x7b, 0xee, 0xc9, 0x3d, 0x1f, 0x80, 0x23, 0x52, 0x2e, 0xb9, 0x24, + 0x8a, 0x20, 0x4a, 0xbe, 0x40, 0x3e, 0x40, 0xe4, 0x99, 0xf1, 0x1f, 0x60, 0xd6, 0x8b, 0x72, 0xf3, + 0xce, 0xfc, 0xde, 0xef, 0xbd, 0x37, 0x33, 0xbf, 0xa7, 0x05, 0x1a, 0x3a, 0xc6, 0x36, 0x0e, 0x91, + 0xe9, 0x60, 0x77, 0x1f, 0x63, 0xc7, 0xec, 0x54, 0xcd, 0xa3, 0x08, 0x85, 0x5d, 0x23, 0x08, 0x31, + 0xc5, 0x10, 0x8a, 0x7d, 0x43, 0xec, 0x1b, 0x9d, 0xaa, 0x5a, 0xb1, 0x31, 0x69, 0x63, 0x62, 0x36, + 0x2d, 0x82, 0x78, 0xb1, 0xd9, 0xa9, 0x36, 0x11, 0xb5, 0xaa, 0x66, 0x60, 0xb9, 0x9e, 0x6f, 0x51, + 0x0f, 0xfb, 0x1c, 0xaf, 0x4e, 0x89, 0xda, 0xa4, 0x2c, 0xdf, 0x5c, 0x9d, 0x95, 0x90, 0x27, 0x3c, + 0xbc, 0x62, 0x46, 0x52, 0x11, 0x58, 0xa1, 0xd5, 0x26, 0xa2, 0x60, 0xdc, 0xc5, 0x2e, 0x66, 0x9f, + 0x66, 0xfc, 0x25, 0x56, 0xa7, 0x5d, 0x8c, 0xdd, 0x16, 0x32, 0xad, 0xc0, 0x33, 0x2d, 0xdf, 0xc7, + 0x94, 0x49, 0x12, 0x18, 0x7d, 0x1c, 0xc0, 0x3f, 0x63, 0x15, 0xbb, 0xac, 0x51, 0x1d, 0x1d, 0x45, + 0x88, 0x50, 0x7d, 0x07, 0x7c, 0x72, 0x65, 0x95, 0x04, 0xd8, 0x27, 0x08, 0xfe, 0x00, 0x46, 0x39, + 0xe1, 0x67, 0xca, 0xac, 0xf2, 0xf5, 0x47, 0x4b, 0xaa, 0x71, 0xf3, 0x44, 0x0c, 0x8e, 0x59, 0xff, + 0xe0, 0xec, 0xc5, 0x4c, 0xa9, 0x2e, 0xea, 0xf5, 0x65, 0x30, 0xc5, 0x1a, 0xee, 0x04, 0x74, 0x27, + 0xa2, 0x64, 0x0f, 0x6f, 0x7a, 0xbe, 0x47, 0x0e, 0x04, 0x1f, 0x1c, 0x07, 0x23, 0x28, 0xc0, 0xf6, + 0x01, 0xeb, 0x3b, 0x5c, 0xe7, 0x3f, 0xf4, 0xbf, 0xc0, 0x57, 0x02, 0x84, 0x42, 0x8b, 0xe2, 0x90, + 0x83, 0x39, 0x72, 0x23, 0xae, 0x48, 0xe0, 0x15, 0xf0, 0x31, 0x16, 0x35, 0x0d, 0xcb, 0xb6, 0x1b, + 0x96, 0xe3, 0x84, 0xac, 0x55, 0xb9, 0x3e, 0x96, 0x6c, 0xd4, 0x6c, 0xbb, 0xe6, 0x38, 0xa1, 0xbe, + 0x06, 0xe6, 0x06, 0x35, 0x15, 0x6e, 0xe5, 0xa2, 0x56, 0xc1, 0x17, 0x0c, 0xff, 0xb7, 0xef, 0xa0, + 0x16, 0x72, 0xf9, 0x61, 0xee, 0xe1, 0xdf, 0x2d, 0x1a, 0x85, 0xa8, 0xd8, 0xcf, 0xa6, 0xf0, 0x93, + 0x87, 0x32, 0x9c, 0x47, 0xbb, 0x57, 0xfc, 0x7c, 0x0e, 0x40, 0x88, 0x6c, 0x1c, 0x3a, 0x8d, 0x43, + 0xd4, 0x15, 0x46, 0xca, 0x7c, 0x65, 0x1b, 0x75, 0x53, 0x0b, 0x05, 0x7d, 0x0a, 0x2d, 0x7c, 0x07, + 0x26, 0x18, 0xfe, 0x1f, 0xab, 0xe5, 0x39, 0xf1, 0x19, 0x24, 0xbc, 0x53, 0xa0, 0x6c, 0x63, 0x9f, + 0xe4, 0xcf, 0xef, 0xc3, 0x78, 0x81, 0x1d, 0xdc, 0x7f, 0xe0, 0xd3, 0xeb, 0x28, 0xc1, 0xb2, 0x0e, + 0xca, 0x9d, 0x64, 0x51, 0xbc, 0x8c, 0x2f, 0x65, 0x2f, 0x63, 0x83, 0x2f, 0x65, 0x0d, 0x32, 0x98, + 0x6e, 0x83, 0x49, 0xd6, 0xbd, 0xd6, 0x6a, 0xa5, 0xfb, 0xc9, 0x73, 0x84, 0x9b, 0x00, 0x64, 0xc3, + 0x24, 0x18, 0xe6, 0x0c, 0x3e, 0x4d, 0x46, 0x3c, 0x79, 0x06, 0x9f, 0x24, 0x31, 0x79, 0xc6, 0xae, + 0xe5, 0x26, 0x57, 0x51, 0xcf, 0x21, 0xf5, 0xc7, 0x0a, 0x50, 0x65, 0x2c, 0xc2, 0xc7, 0x6f, 0x00, + 0xa4, 0x82, 0xe2, 0x27, 0x3e, 0x7c, 0x5b, 0x23, 0xe2, 0xb1, 0xe7, 0xd0, 0x70, 0xeb, 0x8a, 0xe4, + 0x21, 0x26, 0x79, 0x7e, 0xa0, 0x64, 0x2e, 0x24, 0xaf, 0x79, 0xe9, 0x6d, 0x19, 0x8c, 0x30, 0xcd, + 0xf0, 0x54, 0x01, 0xa3, 0x7c, 0xb8, 0xe0, 0x9c, 0x4c, 0xd5, 0xcd, 0x39, 0x56, 0xe7, 0x07, 0xd6, + 0x71, 0x46, 0x7d, 0xfe, 0xce, 0x9b, 0x47, 0x15, 0xe5, 0xf4, 0xe9, 0xab, 0x7b, 0x43, 0xd3, 0x50, + 0x35, 0xfb, 0x26, 0x0d, 0x7c, 0xa8, 0x80, 0xb1, 0x6b, 0x43, 0x0c, 0xcd, 0xbe, 0x2c, 0xf2, 0x71, + 0x57, 0xa5, 0x87, 0x5a, 0xb3, 0x6d, 0x1c, 0xf9, 0x34, 0x7e, 0x6b, 0x88, 0x10, 0x44, 0xf4, 0x9f, + 0x32, 0x4d, 0xdf, 0x42, 0x43, 0xa6, 0x09, 0x07, 0xb4, 0x81, 0x23, 0x4a, 0x1a, 0x14, 0x37, 0xf6, + 0x19, 0x83, 0x79, 0xc2, 0x9e, 0x78, 0x0f, 0xbe, 0x56, 0xc0, 0x64, 0xdf, 0x11, 0x87, 0xab, 0x05, + 0x8a, 0x8b, 0xb3, 0x46, 0xfd, 0xf1, 0x7d, 0xa0, 0xe2, 0x94, 0xeb, 0x99, 0xa3, 0x2d, 0xb8, 0x21, + 0x77, 0x24, 0x62, 0x4c, 0x58, 0x13, 0xb6, 0x1a, 0xcc, 0x95, 0x79, 0x72, 0x23, 0xe5, 0x7a, 0xf0, + 0x89, 0x02, 0x26, 0xa4, 0x59, 0x04, 0x57, 0xfa, 0x2a, 0x2d, 0xca, 0x2e, 0xb5, 0x22, 0x83, 0xe5, + 0x11, 0xf5, 0x24, 0x88, 0x88, 0xfe, 0x4b, 0x66, 0x68, 0x05, 0x2e, 0xcb, 0x0c, 0x45, 0x79, 0xaa, + 0xf8, 0x9e, 0xda, 0x8c, 0x2c, 0xbd, 0xa7, 0xe7, 0x0a, 0x98, 0xec, 0x9b, 0x63, 0x05, 0xf7, 0x34, + 0x28, 0x43, 0x0b, 0xee, 0x69, 0x60, 0x6c, 0xea, 0xbf, 0x66, 0xb6, 0xd6, 0xe0, 0xcf, 0x83, 0x6c, + 0x71, 0x4b, 0x1e, 0xed, 0x26, 0xb7, 0x94, 0x65, 0x77, 0x0f, 0x3e, 0x50, 0x40, 0x39, 0xcd, 0x09, + 0xb8, 0xd0, 0x57, 0xd4, 0xf5, 0x2c, 0x96, 0x5f, 0x83, 0x3c, 0x80, 0xf5, 0xef, 0x33, 0xbd, 0x8b, + 0x70, 0x41, 0xa6, 0x37, 0x4d, 0x26, 0xf3, 0x24, 0x4d, 0xf8, 0x1e, 0xbc, 0xaf, 0x00, 0x90, 0xe5, + 0x20, 0xfc, 0xa6, 0x2f, 0xa5, 0x2c, 0x95, 0x55, 0xe3, 0xb6, 0xe5, 0x42, 0xe5, 0x62, 0xa6, 0x72, + 0x16, 0x6a, 0x85, 0x2a, 0xc9, 0xfa, 0xf6, 0xd9, 0x85, 0xa6, 0x9c, 0x5f, 0x68, 0xca, 0xcb, 0x0b, + 0x4d, 0xb9, 0x7b, 0xa9, 0x95, 0xce, 0x2f, 0xb5, 0xd2, 0xb3, 0x4b, 0xad, 0xf4, 0x6f, 0xd5, 0xf5, + 0xe8, 0x41, 0xd4, 0x34, 0x6c, 0xdc, 0x36, 0x45, 0x10, 0xff, 0x81, 0xe8, 0xff, 0x38, 0x3c, 0x4c, + 0x5b, 0x1e, 0xa7, 0x4d, 0x69, 0x37, 0x40, 0xa4, 0x39, 0xca, 0xfe, 0xeb, 0x2c, 0xbf, 0x0b, 0x00, + 0x00, 0xff, 0xff, 0x0d, 0x54, 0x9a, 0x11, 0xe1, 0x09, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -527,8 +689,12 @@ type QueryClient interface { UndelegationsToMature(ctx context.Context, in *QueryUndelegationsToMatureRequest, opts ...grpc.CallOption) (*UndelegationRecordKeys, error) // UndelegationMaturityEpoch queries the epoch when the undelegation will mature. UndelegationMaturityEpoch(ctx context.Context, in *QueryUndelegationMaturityEpochRequest, opts ...grpc.CallOption) (*QueryUndelegationMaturityEpochResponse, error) - // QueryValidator queries the validator for the given consensus address. - QueryValidator(ctx context.Context, in *QueryValidatorRequest, opts ...grpc.CallOption) (*ExocoreValidator, error) + // Validator queries the validator for the given consensus address. This is the bare-minimum validator: + // public key, consensus address, power. No other params such as commission, jailed, etc. are included. + Validator(ctx context.Context, in *QueryValidatorRequest, opts ...grpc.CallOption) (*QueryValidatorResponse, error) + // Validators queries all validators. This is the bare-minimum validator: public key, consensus address, power. + // No other params such as commission, jailed, etc. are included. + Validators(ctx context.Context, in *QueryAllValidatorsRequest, opts ...grpc.CallOption) (*QueryAllValidatorsResponse, error) } type queryClient struct { @@ -584,9 +750,18 @@ func (c *queryClient) UndelegationMaturityEpoch(ctx context.Context, in *QueryUn return out, nil } -func (c *queryClient) QueryValidator(ctx context.Context, in *QueryValidatorRequest, opts ...grpc.CallOption) (*ExocoreValidator, error) { - out := new(ExocoreValidator) - err := c.cc.Invoke(ctx, "/exocore.dogfood.v1.Query/QueryValidator", in, out, opts...) +func (c *queryClient) Validator(ctx context.Context, in *QueryValidatorRequest, opts ...grpc.CallOption) (*QueryValidatorResponse, error) { + out := new(QueryValidatorResponse) + err := c.cc.Invoke(ctx, "/exocore.dogfood.v1.Query/Validator", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Validators(ctx context.Context, in *QueryAllValidatorsRequest, opts ...grpc.CallOption) (*QueryAllValidatorsResponse, error) { + out := new(QueryAllValidatorsResponse) + err := c.cc.Invoke(ctx, "/exocore.dogfood.v1.Query/Validators", in, out, opts...) if err != nil { return nil, err } @@ -605,8 +780,12 @@ type QueryServer interface { UndelegationsToMature(context.Context, *QueryUndelegationsToMatureRequest) (*UndelegationRecordKeys, error) // UndelegationMaturityEpoch queries the epoch when the undelegation will mature. UndelegationMaturityEpoch(context.Context, *QueryUndelegationMaturityEpochRequest) (*QueryUndelegationMaturityEpochResponse, error) - // QueryValidator queries the validator for the given consensus address. - QueryValidator(context.Context, *QueryValidatorRequest) (*ExocoreValidator, error) + // Validator queries the validator for the given consensus address. This is the bare-minimum validator: + // public key, consensus address, power. No other params such as commission, jailed, etc. are included. + Validator(context.Context, *QueryValidatorRequest) (*QueryValidatorResponse, error) + // Validators queries all validators. This is the bare-minimum validator: public key, consensus address, power. + // No other params such as commission, jailed, etc. are included. + Validators(context.Context, *QueryAllValidatorsRequest) (*QueryAllValidatorsResponse, error) } // UnimplementedQueryServer can be embedded to have forward compatible implementations. @@ -628,8 +807,11 @@ func (*UnimplementedQueryServer) UndelegationsToMature(ctx context.Context, req func (*UnimplementedQueryServer) UndelegationMaturityEpoch(ctx context.Context, req *QueryUndelegationMaturityEpochRequest) (*QueryUndelegationMaturityEpochResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method UndelegationMaturityEpoch not implemented") } -func (*UnimplementedQueryServer) QueryValidator(ctx context.Context, req *QueryValidatorRequest) (*ExocoreValidator, error) { - return nil, status.Errorf(codes.Unimplemented, "method QueryValidator not implemented") +func (*UnimplementedQueryServer) Validator(ctx context.Context, req *QueryValidatorRequest) (*QueryValidatorResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Validator not implemented") +} +func (*UnimplementedQueryServer) Validators(ctx context.Context, req *QueryAllValidatorsRequest) (*QueryAllValidatorsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Validators not implemented") } func RegisterQueryServer(s grpc1.Server, srv QueryServer) { @@ -726,20 +908,38 @@ func _Query_UndelegationMaturityEpoch_Handler(srv interface{}, ctx context.Conte return interceptor(ctx, in, info, handler) } -func _Query_QueryValidator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +func _Query_Validator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(QueryValidatorRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(QueryServer).QueryValidator(ctx, in) + return srv.(QueryServer).Validator(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/exocore.dogfood.v1.Query/QueryValidator", + FullMethod: "/exocore.dogfood.v1.Query/Validator", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).QueryValidator(ctx, req.(*QueryValidatorRequest)) + return srv.(QueryServer).Validator(ctx, req.(*QueryValidatorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Validators_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryAllValidatorsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Validators(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/exocore.dogfood.v1.Query/Validators", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Validators(ctx, req.(*QueryAllValidatorsRequest)) } return interceptor(ctx, in, info, handler) } @@ -769,8 +969,12 @@ var _Query_serviceDesc = grpc.ServiceDesc{ Handler: _Query_UndelegationMaturityEpoch_Handler, }, { - MethodName: "QueryValidator", - Handler: _Query_QueryValidator_Handler, + MethodName: "Validator", + Handler: _Query_Validator_Handler, + }, + { + MethodName: "Validators", + Handler: _Query_Validators_Handler, }, }, Streams: []grpc.StreamDesc{}, @@ -1035,6 +1239,125 @@ func (m *QueryValidatorRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *QueryValidatorResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryValidatorResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryValidatorResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Validator != nil { + { + size, err := m.Validator.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryAllValidatorsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllValidatorsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllValidatorsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryAllValidatorsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllValidatorsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllValidatorsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Validators) > 0 { + for iNdEx := len(m.Validators) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Validators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { offset -= sovQuery(v) base := offset @@ -1153,6 +1476,51 @@ func (m *QueryValidatorRequest) Size() (n int) { return n } +func (m *QueryValidatorResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Validator != nil { + l = m.Validator.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryAllValidatorsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryAllValidatorsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Validators) > 0 { + for _, e := range m.Validators { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + func sovQuery(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -1814,6 +2182,298 @@ func (m *QueryValidatorRequest) Unmarshal(dAtA []byte) error { } return nil } +func (m *QueryValidatorResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryValidatorResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryValidatorResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validator", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Validator == nil { + m.Validator = &ExocoreValidator{} + } + if err := m.Validator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllValidatorsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllValidatorsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllValidatorsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllValidatorsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllValidatorsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllValidatorsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Validators = append(m.Validators, ExocoreValidator{}) + if err := m.Validators[len(m.Validators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipQuery(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/x/dogfood/types/query.pb.gw.go b/x/dogfood/types/query.pb.gw.go index 0cd87f36c..655c92dda 100644 --- a/x/dogfood/types/query.pb.gw.go +++ b/x/dogfood/types/query.pb.gw.go @@ -267,7 +267,7 @@ func local_request_Query_UndelegationMaturityEpoch_0(ctx context.Context, marsha } -func request_Query_QueryValidator_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { +func request_Query_Validator_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq QueryValidatorRequest var metadata runtime.ServerMetadata @@ -289,12 +289,12 @@ func request_Query_QueryValidator_0(ctx context.Context, marshaler runtime.Marsh return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "cons_addr", err) } - msg, err := client.QueryValidator(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + msg, err := client.Validator(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } -func local_request_Query_QueryValidator_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { +func local_request_Query_Validator_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq QueryValidatorRequest var metadata runtime.ServerMetadata @@ -316,7 +316,43 @@ func local_request_Query_QueryValidator_0(ctx context.Context, marshaler runtime return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "cons_addr", err) } - msg, err := server.QueryValidator(ctx, &protoReq) + msg, err := server.Validator(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_Validators_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_Validators_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllValidatorsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Validators_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Validators(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Validators_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllValidatorsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Validators_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Validators(ctx, &protoReq) return msg, metadata, err } @@ -442,7 +478,30 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv }) - mux.Handle("GET", pattern_Query_QueryValidator_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("GET", pattern_Query_Validator_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Validator_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Validator_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Validators_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream @@ -453,7 +512,7 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Query_QueryValidator_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_Query_Validators_0(rctx, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { @@ -461,7 +520,7 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv return } - forward_Query_QueryValidator_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Query_Validators_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -606,7 +665,27 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }) - mux.Handle("GET", pattern_Query_QueryValidator_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("GET", pattern_Query_Validator_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Validator_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Validator_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Validators_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) @@ -615,14 +694,14 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Query_QueryValidator_0(rctx, inboundMarshaler, client, req, pathParams) + resp, md, err := request_Query_Validators_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Query_QueryValidator_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Query_Validators_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -640,7 +719,9 @@ var ( pattern_Query_UndelegationMaturityEpoch_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"exocore", "dogfood", "v1", "undelegation_maturity_epoch", "record_key"}, "", runtime.AssumeColonVerbOpt(false))) - pattern_Query_QueryValidator_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"exocore", "dogfood", "v1", "validator", "cons_addr"}, "", runtime.AssumeColonVerbOpt(false))) + pattern_Query_Validator_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"exocore", "dogfood", "v1", "validator", "cons_addr"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_Validators_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"exocore", "dogfood", "v1", "validators"}, "", runtime.AssumeColonVerbOpt(false))) ) var ( @@ -654,5 +735,7 @@ var ( forward_Query_UndelegationMaturityEpoch_0 = runtime.ForwardResponseMessage - forward_Query_QueryValidator_0 = runtime.ForwardResponseMessage + forward_Query_Validator_0 = runtime.ForwardResponseMessage + + forward_Query_Validators_0 = runtime.ForwardResponseMessage ) diff --git a/x/epochs/client/cli/query.go b/x/epochs/client/cli/query.go index 89ecc1d18..6f602a4aa 100644 --- a/x/epochs/client/cli/query.go +++ b/x/epochs/client/cli/query.go @@ -28,6 +28,7 @@ func GetQueryCmd(string) *cobra.Command { cmd.AddCommand(GetCmdEpochsInfos()) cmd.AddCommand(GetCmdCurrentEpoch()) + cmd.AddCommand(GetCmdEpochInfo()) return cmd } @@ -107,3 +108,38 @@ func GetCmdCurrentEpoch() *cobra.Command { return cmd } + +// GetCmdEpochInfo provides epoch info for the given identifier. +func GetCmdEpochInfo() *cobra.Command { + cmd := &cobra.Command{ + Use: "epoch-info ", + Short: "Query epoch info for the given identifier", + Example: strings.TrimSpace( + fmt.Sprintf(`$ %s query %s epoch-info week`, + version.AppName, types.ModuleName, + ), + ), + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + + queryClient := types.NewQueryClient(clientCtx) + + res, err := queryClient.EpochInfo(cmd.Context(), &types.QueryEpochInfoRequest{ + Identifier: args[0], + }) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/epochs/keeper/grpc_query.go b/x/epochs/keeper/grpc_query.go index 39879d17a..f9d7f0bff 100644 --- a/x/epochs/keeper/grpc_query.go +++ b/x/epochs/keeper/grpc_query.go @@ -71,3 +71,25 @@ func (k Keeper) CurrentEpoch( CurrentEpoch: info.CurrentEpoch, }, nil } + +// GetEpochInfo provides epoch info of specified identifier +func (k Keeper) EpochInfo( + c context.Context, + req *types.QueryEpochInfoRequest, +) (*types.QueryEpochInfoResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + ctx := sdk.UnwrapSDKContext(c) + + info, found := k.GetEpochInfo(ctx, req.Identifier) + if !found { + return nil, status.Errorf(codes.NotFound, "epoch info not found: %s", req.Identifier) + } + + return &types.QueryEpochInfoResponse{ + Epoch: info, + BlockTime: ctx.BlockTime(), + }, nil +} diff --git a/x/epochs/types/query.pb.go b/x/epochs/types/query.pb.go index 8a902d5cb..d6235beac 100644 --- a/x/epochs/types/query.pb.go +++ b/x/epochs/types/query.pb.go @@ -146,6 +146,107 @@ func (m *QueryEpochsInfoResponse) GetPagination() *query.PageResponse { return nil } +// QueryEpochInfoRequest is the request type for the Query/EpochInfo RPC method. +type QueryEpochInfoRequest struct { + // identifier of the epoch for which the information is requested. + Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` +} + +func (m *QueryEpochInfoRequest) Reset() { *m = QueryEpochInfoRequest{} } +func (m *QueryEpochInfoRequest) String() string { return proto.CompactTextString(m) } +func (*QueryEpochInfoRequest) ProtoMessage() {} +func (*QueryEpochInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9e74fdf7cde95576, []int{2} +} +func (m *QueryEpochInfoRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryEpochInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryEpochInfoRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryEpochInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryEpochInfoRequest.Merge(m, src) +} +func (m *QueryEpochInfoRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryEpochInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryEpochInfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryEpochInfoRequest proto.InternalMessageInfo + +func (m *QueryEpochInfoRequest) GetIdentifier() string { + if m != nil { + return m.Identifier + } + return "" +} + +// QueryEpochInfoResponse is the response type for the Query/EpochInfo RPC method. +type QueryEpochInfoResponse struct { + // epoch is the EpochInfo for the requested epoch. + Epoch EpochInfo `protobuf:"bytes,1,opt,name=epoch,proto3" json:"epoch"` + // block_time is the block time of the query block ctx. + BlockTime time.Time `protobuf:"bytes,2,opt,name=block_time,json=blockTime,proto3,stdtime" json:"block_time"` +} + +func (m *QueryEpochInfoResponse) Reset() { *m = QueryEpochInfoResponse{} } +func (m *QueryEpochInfoResponse) String() string { return proto.CompactTextString(m) } +func (*QueryEpochInfoResponse) ProtoMessage() {} +func (*QueryEpochInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9e74fdf7cde95576, []int{3} +} +func (m *QueryEpochInfoResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryEpochInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryEpochInfoResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryEpochInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryEpochInfoResponse.Merge(m, src) +} +func (m *QueryEpochInfoResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryEpochInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryEpochInfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryEpochInfoResponse proto.InternalMessageInfo + +func (m *QueryEpochInfoResponse) GetEpoch() EpochInfo { + if m != nil { + return m.Epoch + } + return EpochInfo{} +} + +func (m *QueryEpochInfoResponse) GetBlockTime() time.Time { + if m != nil { + return m.BlockTime + } + return time.Time{} +} + // QueryCurrentEpochRequest is the request type for the Query/EpochInfos RPC // method. type QueryCurrentEpochRequest struct { @@ -157,7 +258,7 @@ func (m *QueryCurrentEpochRequest) Reset() { *m = QueryCurrentEpochReque func (m *QueryCurrentEpochRequest) String() string { return proto.CompactTextString(m) } func (*QueryCurrentEpochRequest) ProtoMessage() {} func (*QueryCurrentEpochRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9e74fdf7cde95576, []int{2} + return fileDescriptor_9e74fdf7cde95576, []int{4} } func (m *QueryCurrentEpochRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -204,7 +305,7 @@ func (m *QueryCurrentEpochResponse) Reset() { *m = QueryCurrentEpochResp func (m *QueryCurrentEpochResponse) String() string { return proto.CompactTextString(m) } func (*QueryCurrentEpochResponse) ProtoMessage() {} func (*QueryCurrentEpochResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9e74fdf7cde95576, []int{3} + return fileDescriptor_9e74fdf7cde95576, []int{5} } func (m *QueryCurrentEpochResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -243,6 +344,8 @@ func (m *QueryCurrentEpochResponse) GetCurrentEpoch() int64 { func init() { proto.RegisterType((*QueryEpochsInfoRequest)(nil), "exocore.epochs.v1.QueryEpochsInfoRequest") proto.RegisterType((*QueryEpochsInfoResponse)(nil), "exocore.epochs.v1.QueryEpochsInfoResponse") + proto.RegisterType((*QueryEpochInfoRequest)(nil), "exocore.epochs.v1.QueryEpochInfoRequest") + proto.RegisterType((*QueryEpochInfoResponse)(nil), "exocore.epochs.v1.QueryEpochInfoResponse") proto.RegisterType((*QueryCurrentEpochRequest)(nil), "exocore.epochs.v1.QueryCurrentEpochRequest") proto.RegisterType((*QueryCurrentEpochResponse)(nil), "exocore.epochs.v1.QueryCurrentEpochResponse") } @@ -250,40 +353,43 @@ func init() { func init() { proto.RegisterFile("exocore/epochs/v1/query.proto", fileDescriptor_9e74fdf7cde95576) } var fileDescriptor_9e74fdf7cde95576 = []byte{ - // 518 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0x86, 0xb3, 0x09, 0x54, 0x74, 0x5b, 0x0e, 0xac, 0x10, 0xa4, 0xa6, 0x38, 0x25, 0x48, 0x50, - 0x0a, 0xec, 0x92, 0x20, 0x2e, 0x3d, 0xa1, 0x54, 0x05, 0xc1, 0x01, 0x81, 0xc5, 0x89, 0x4b, 0xb1, - 0xcd, 0xc4, 0xb5, 0xda, 0x78, 0x5c, 0xef, 0x3a, 0xb4, 0x42, 0x5c, 0x38, 0x70, 0xae, 0xc4, 0x0b, - 0xf0, 0x0e, 0xbc, 0x44, 0x8f, 0x95, 0xb8, 0x70, 0x02, 0x94, 0xf0, 0x20, 0xc8, 0xbb, 0xeb, 0x36, - 0x51, 0x8c, 0xe8, 0xcd, 0xde, 0x99, 0x7f, 0xe6, 0xff, 0xfe, 0x5d, 0x7a, 0x1d, 0xf6, 0x31, 0xc4, - 0x0c, 0x04, 0xa4, 0x18, 0x6e, 0x4b, 0x31, 0xec, 0x88, 0xbd, 0x1c, 0xb2, 0x03, 0x9e, 0x66, 0xa8, - 0x90, 0x5d, 0xb2, 0x65, 0x6e, 0xca, 0x7c, 0xd8, 0x71, 0xd6, 0x42, 0x94, 0x03, 0x94, 0x22, 0xf0, - 0x25, 0x98, 0x5e, 0x31, 0xec, 0x04, 0xa0, 0xfc, 0x8e, 0x48, 0xfd, 0x28, 0x4e, 0x7c, 0x15, 0x63, - 0x62, 0xe4, 0x4e, 0x6b, 0x76, 0x7a, 0x04, 0x09, 0xc8, 0x58, 0xda, 0x86, 0xcb, 0x11, 0x46, 0xa8, - 0x3f, 0x45, 0xf1, 0x65, 0x4f, 0x97, 0x23, 0xc4, 0x68, 0x17, 0x84, 0x9f, 0xc6, 0xc2, 0x4f, 0x12, - 0x54, 0x7a, 0x66, 0xa9, 0x69, 0xd9, 0xaa, 0xfe, 0x0b, 0xf2, 0xbe, 0x50, 0xf1, 0x00, 0xa4, 0xf2, - 0x07, 0xa9, 0x69, 0x68, 0xbf, 0xa5, 0x57, 0x5e, 0x15, 0xbe, 0x36, 0xf5, 0xd2, 0x67, 0x49, 0x1f, - 0x3d, 0xd8, 0xcb, 0x41, 0x2a, 0xf6, 0x84, 0xd2, 0x53, 0x8f, 0x4d, 0xb2, 0x42, 0x56, 0x17, 0xba, - 0xb7, 0xb8, 0x01, 0xe2, 0x05, 0x10, 0x37, 0xf0, 0x16, 0x88, 0xbf, 0xf4, 0x23, 0xb0, 0x5a, 0x6f, - 0x42, 0xd9, 0x1e, 0x13, 0x7a, 0x75, 0x66, 0x85, 0x4c, 0x31, 0x91, 0xc0, 0xd6, 0xe9, 0x9c, 0xa1, - 0x6d, 0x92, 0x95, 0xc6, 0xea, 0x42, 0x77, 0x99, 0xcf, 0x64, 0xc8, 0xb5, 0xac, 0x50, 0xf5, 0xce, - 0x1d, 0xfd, 0x6c, 0xd5, 0x3c, 0xab, 0x60, 0x1b, 0x94, 0x06, 0xbb, 0x18, 0xee, 0x6c, 0x15, 0x48, - 0xcd, 0xba, 0xf6, 0xe7, 0x70, 0xc3, 0xcb, 0x4b, 0x5e, 0xfe, 0xba, 0xe4, 0xed, 0x5d, 0x28, 0xd4, - 0x87, 0xbf, 0x5a, 0xc4, 0x9b, 0xd7, 0xba, 0xa2, 0xc2, 0x9e, 0x4e, 0x41, 0x36, 0xf4, 0x90, 0xdb, - 0xff, 0x85, 0x34, 0xee, 0xa7, 0x28, 0xd7, 0x69, 0x53, 0x43, 0x6e, 0xe4, 0x59, 0x06, 0x89, 0xd2, - 0xa6, 0xcb, 0x24, 0x5d, 0x4a, 0xe3, 0x77, 0x90, 0xa8, 0xb8, 0x1f, 0x43, 0xa6, 0x93, 0x9c, 0xf7, - 0x26, 0x4e, 0xda, 0x8f, 0xe9, 0x52, 0x85, 0xd6, 0x46, 0x74, 0x93, 0x5e, 0x0c, 0xcd, 0xf9, 0x96, - 0x06, 0xd7, 0xfa, 0x86, 0xb7, 0x18, 0x4e, 0x34, 0x77, 0xbf, 0xd5, 0xe9, 0x79, 0x3d, 0x82, 0x7d, - 0x26, 0x94, 0x9e, 0x24, 0x26, 0xd9, 0x9d, 0x8a, 0x40, 0xab, 0xef, 0xdb, 0x59, 0x3b, 0x4b, 0xab, - 0x31, 0xd5, 0xbe, 0xf1, 0xe9, 0xfb, 0x9f, 0x2f, 0xf5, 0x6b, 0x6c, 0x49, 0xcc, 0x3e, 0x5a, 0x7b, - 0x3d, 0x5f, 0x09, 0x5d, 0x9c, 0x04, 0x62, 0x77, 0xff, 0x35, 0xbf, 0x22, 0x32, 0xe7, 0xde, 0xd9, - 0x9a, 0xad, 0x9d, 0x47, 0xda, 0x8e, 0x60, 0xf7, 0x2b, 0xec, 0x4c, 0x85, 0x27, 0x3e, 0x9c, 0xc6, - 0xfe, 0xb1, 0xf7, 0xfc, 0x68, 0xe4, 0x92, 0xe3, 0x91, 0x4b, 0x7e, 0x8f, 0x5c, 0x72, 0x38, 0x76, - 0x6b, 0xc7, 0x63, 0xb7, 0xf6, 0x63, 0xec, 0xd6, 0xde, 0x3c, 0x88, 0x62, 0xb5, 0x9d, 0x07, 0x3c, - 0xc4, 0x81, 0xd8, 0x34, 0x23, 0x5f, 0x80, 0x7a, 0x8f, 0xd9, 0xce, 0xc9, 0x86, 0xfd, 0x72, 0x87, - 0x3a, 0x48, 0x41, 0x06, 0x73, 0xfa, 0xc5, 0x3d, 0xfc, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xd4, 0xc9, - 0x47, 0xbe, 0x24, 0x04, 0x00, 0x00, + // 576 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x93, 0x3f, 0x6f, 0x13, 0x3f, + 0x1c, 0xc6, 0xe3, 0xe6, 0xd7, 0xea, 0x97, 0x6f, 0xcb, 0x80, 0xc5, 0x9f, 0x34, 0x94, 0x4b, 0x09, + 0x82, 0xa6, 0x85, 0xda, 0x24, 0x08, 0x81, 0x3a, 0xa1, 0x54, 0x05, 0xc1, 0x80, 0xe0, 0xc4, 0xc4, + 0x52, 0xee, 0x0e, 0xe7, 0x7a, 0x6a, 0x73, 0xbe, 0x9e, 0x9d, 0xd0, 0x0a, 0xb1, 0x30, 0xb0, 0x52, + 0x89, 0x81, 0x95, 0x37, 0x83, 0xd4, 0xb1, 0x12, 0x0b, 0x13, 0xa0, 0x84, 0x17, 0x82, 0xce, 0x76, + 0x92, 0x8b, 0x72, 0x85, 0x1b, 0xd8, 0x1c, 0xfb, 0xfb, 0x3c, 0x7e, 0x3e, 0x4f, 0x7c, 0x70, 0x99, + 0x1d, 0x70, 0x8f, 0xc7, 0x8c, 0xb2, 0x88, 0x7b, 0x3b, 0x82, 0xf6, 0x1a, 0x74, 0xbf, 0xcb, 0xe2, + 0x43, 0x12, 0xc5, 0x5c, 0x72, 0x7c, 0xd6, 0x1c, 0x13, 0x7d, 0x4c, 0x7a, 0x8d, 0xca, 0x9a, 0xc7, + 0x45, 0x87, 0x0b, 0xea, 0x3a, 0x82, 0xe9, 0x59, 0xda, 0x6b, 0xb8, 0x4c, 0x3a, 0x0d, 0x1a, 0x39, + 0x7e, 0x10, 0x3a, 0x32, 0xe0, 0xa1, 0x96, 0x57, 0xaa, 0xd3, 0xee, 0x3e, 0x0b, 0x99, 0x08, 0x84, + 0x19, 0x38, 0xe7, 0x73, 0x9f, 0xab, 0x25, 0x4d, 0x56, 0x66, 0x77, 0xc9, 0xe7, 0xdc, 0xdf, 0x63, + 0xd4, 0x89, 0x02, 0xea, 0x84, 0x21, 0x97, 0xca, 0x73, 0xa8, 0xa9, 0x9a, 0x53, 0xf5, 0xcb, 0xed, + 0xb6, 0xa9, 0x0c, 0x3a, 0x4c, 0x48, 0xa7, 0x13, 0xe9, 0x81, 0xda, 0x4b, 0xb8, 0xf0, 0x2c, 0xc9, + 0xb5, 0xa5, 0x2e, 0x7d, 0x14, 0xb6, 0xb9, 0xcd, 0xf6, 0xbb, 0x4c, 0x48, 0xfc, 0x00, 0x60, 0x9c, + 0xb1, 0x8c, 0x96, 0x51, 0x7d, 0xbe, 0x79, 0x9d, 0x68, 0x20, 0x92, 0x00, 0x11, 0x0d, 0x6f, 0x80, + 0xc8, 0x53, 0xc7, 0x67, 0x46, 0x6b, 0xa7, 0x94, 0xb5, 0x01, 0x82, 0x8b, 0x53, 0x57, 0x88, 0x88, + 0x87, 0x82, 0xe1, 0x0d, 0x98, 0xd3, 0xb4, 0x65, 0xb4, 0x5c, 0xac, 0xcf, 0x37, 0x97, 0xc8, 0x54, + 0x87, 0x44, 0xc9, 0x12, 0x55, 0xeb, 0xbf, 0xe3, 0xef, 0xd5, 0x82, 0x6d, 0x14, 0x78, 0x13, 0xc0, + 0xdd, 0xe3, 0xde, 0xee, 0x76, 0x82, 0x54, 0x9e, 0x51, 0xf9, 0x2a, 0x44, 0xf3, 0x92, 0x21, 0x2f, + 0x79, 0x3e, 0xe4, 0x6d, 0xfd, 0x9f, 0xa8, 0x8f, 0x7e, 0x54, 0x91, 0x5d, 0x52, 0xba, 0xe4, 0x04, + 0x3f, 0x9c, 0x80, 0x2c, 0x2a, 0x93, 0x95, 0xbf, 0x42, 0xea, 0xf4, 0x13, 0x94, 0x77, 0xe1, 0xfc, + 0x18, 0x32, 0x5d, 0xa3, 0x05, 0x10, 0xbc, 0x62, 0xa1, 0x0c, 0xda, 0x01, 0x8b, 0x55, 0x8d, 0x25, + 0x3b, 0xb5, 0x53, 0xfb, 0x84, 0xd2, 0xff, 0xc0, 0x44, 0x3b, 0xf7, 0x60, 0x56, 0xb1, 0x9a, 0xf2, + 0xf3, 0x94, 0xa3, 0x05, 0xff, 0xa4, 0x9b, 0xda, 0x06, 0x94, 0x55, 0xb0, 0xcd, 0x6e, 0x1c, 0xb3, + 0x50, 0xaa, 0xab, 0xf2, 0x52, 0xdd, 0x87, 0xc5, 0x0c, 0xad, 0xe1, 0xba, 0x0a, 0x67, 0x3c, 0xbd, + 0xbf, 0x3d, 0xe6, 0x2b, 0xda, 0x0b, 0x5e, 0x6a, 0xb8, 0xf9, 0xa5, 0x08, 0xb3, 0xca, 0x02, 0xbf, + 0x47, 0x00, 0x23, 0x4e, 0x81, 0x57, 0x33, 0x6a, 0xc8, 0x7e, 0xc2, 0x95, 0xb5, 0x3c, 0xa3, 0x3a, + 0x54, 0xed, 0xca, 0xbb, 0xaf, 0xbf, 0x3e, 0xce, 0x5c, 0xc2, 0x8b, 0x74, 0xfa, 0x3b, 0x34, 0x2f, + 0xee, 0x33, 0x82, 0x85, 0x34, 0x10, 0xbe, 0x71, 0x9a, 0x7f, 0x46, 0x65, 0x95, 0x9b, 0xf9, 0x86, + 0x4d, 0x9c, 0x3b, 0x2a, 0x0e, 0xc5, 0xeb, 0x19, 0x71, 0x26, 0xca, 0xa3, 0x6f, 0xc6, 0xb5, 0xbf, + 0xc5, 0x1f, 0x10, 0x94, 0x46, 0x5d, 0xe1, 0xfa, 0x1f, 0xf9, 0xd3, 0x4d, 0xad, 0xe6, 0x98, 0x34, + 0xc9, 0xd6, 0x55, 0xb2, 0x15, 0x7c, 0xed, 0xb4, 0xa2, 0x26, 0x12, 0xb5, 0x1e, 0x1f, 0xf7, 0x2d, + 0x74, 0xd2, 0xb7, 0xd0, 0xcf, 0xbe, 0x85, 0x8e, 0x06, 0x56, 0xe1, 0x64, 0x60, 0x15, 0xbe, 0x0d, + 0xac, 0xc2, 0x8b, 0x5b, 0x7e, 0x20, 0x77, 0xba, 0x2e, 0xf1, 0x78, 0x87, 0x6e, 0x69, 0xab, 0x27, + 0x4c, 0xbe, 0xe6, 0xf1, 0xee, 0xc8, 0xf9, 0x60, 0xe8, 0x2d, 0x0f, 0x23, 0x26, 0xdc, 0x39, 0xf5, + 0x74, 0x6f, 0xff, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xa9, 0x85, 0x7b, 0xdb, 0x89, 0x05, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -302,6 +408,8 @@ type QueryClient interface { EpochInfos(ctx context.Context, in *QueryEpochsInfoRequest, opts ...grpc.CallOption) (*QueryEpochsInfoResponse, error) // CurrentEpoch provide the current epoch for the specified identifier. CurrentEpoch(ctx context.Context, in *QueryCurrentEpochRequest, opts ...grpc.CallOption) (*QueryCurrentEpochResponse, error) + // EpochInfo provides the epoch information for the specified identifier. + EpochInfo(ctx context.Context, in *QueryEpochInfoRequest, opts ...grpc.CallOption) (*QueryEpochInfoResponse, error) } type queryClient struct { @@ -330,12 +438,23 @@ func (c *queryClient) CurrentEpoch(ctx context.Context, in *QueryCurrentEpochReq return out, nil } +func (c *queryClient) EpochInfo(ctx context.Context, in *QueryEpochInfoRequest, opts ...grpc.CallOption) (*QueryEpochInfoResponse, error) { + out := new(QueryEpochInfoResponse) + err := c.cc.Invoke(ctx, "/exocore.epochs.v1.Query/EpochInfo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // QueryServer is the server API for Query service. type QueryServer interface { // EpochInfos provide a list of currently running epochs. EpochInfos(context.Context, *QueryEpochsInfoRequest) (*QueryEpochsInfoResponse, error) // CurrentEpoch provide the current epoch for the specified identifier. CurrentEpoch(context.Context, *QueryCurrentEpochRequest) (*QueryCurrentEpochResponse, error) + // EpochInfo provides the epoch information for the specified identifier. + EpochInfo(context.Context, *QueryEpochInfoRequest) (*QueryEpochInfoResponse, error) } // UnimplementedQueryServer can be embedded to have forward compatible implementations. @@ -348,6 +467,9 @@ func (*UnimplementedQueryServer) EpochInfos(ctx context.Context, req *QueryEpoch func (*UnimplementedQueryServer) CurrentEpoch(ctx context.Context, req *QueryCurrentEpochRequest) (*QueryCurrentEpochResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CurrentEpoch not implemented") } +func (*UnimplementedQueryServer) EpochInfo(ctx context.Context, req *QueryEpochInfoRequest) (*QueryEpochInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method EpochInfo not implemented") +} func RegisterQueryServer(s grpc1.Server, srv QueryServer) { s.RegisterService(&_Query_serviceDesc, srv) @@ -389,6 +511,24 @@ func _Query_CurrentEpoch_Handler(srv interface{}, ctx context.Context, dec func( return interceptor(ctx, in, info, handler) } +func _Query_EpochInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryEpochInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).EpochInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/exocore.epochs.v1.Query/EpochInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).EpochInfo(ctx, req.(*QueryEpochInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Query_serviceDesc = grpc.ServiceDesc{ ServiceName: "exocore.epochs.v1.Query", HandlerType: (*QueryServer)(nil), @@ -401,6 +541,10 @@ var _Query_serviceDesc = grpc.ServiceDesc{ MethodName: "CurrentEpoch", Handler: _Query_CurrentEpoch_Handler, }, + { + MethodName: "EpochInfo", + Handler: _Query_EpochInfo_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "exocore/epochs/v1/query.proto", @@ -498,6 +642,77 @@ func (m *QueryEpochsInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } +func (m *QueryEpochInfoRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryEpochInfoRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryEpochInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Identifier) > 0 { + i -= len(m.Identifier) + copy(dAtA[i:], m.Identifier) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Identifier))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryEpochInfoResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryEpochInfoResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryEpochInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + n4, err4 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.BlockTime, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.BlockTime):]) + if err4 != nil { + return 0, err4 + } + i -= n4 + i = encodeVarintQuery(dAtA, i, uint64(n4)) + i-- + dAtA[i] = 0x12 + { + size, err := m.Epoch.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *QueryCurrentEpochRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -601,6 +816,32 @@ func (m *QueryEpochsInfoResponse) Size() (n int) { return n } +func (m *QueryEpochInfoRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Identifier) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryEpochInfoResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Epoch.Size() + n += 1 + l + sovQuery(uint64(l)) + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.BlockTime) + n += 1 + l + sovQuery(uint64(l)) + return n +} + func (m *QueryCurrentEpochRequest) Size() (n int) { if m == nil { return 0 @@ -871,6 +1112,204 @@ func (m *QueryEpochsInfoResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *QueryEpochInfoRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryEpochInfoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryEpochInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Identifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Identifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryEpochInfoResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryEpochInfoResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryEpochInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Epoch", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Epoch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.BlockTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *QueryCurrentEpochRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/x/epochs/types/query.pb.gw.go b/x/epochs/types/query.pb.gw.go index e87d592da..4fde8711a 100644 --- a/x/epochs/types/query.pb.gw.go +++ b/x/epochs/types/query.pb.gw.go @@ -123,6 +123,60 @@ func local_request_Query_CurrentEpoch_0(ctx context.Context, marshaler runtime.M } +func request_Query_EpochInfo_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryEpochInfoRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["identifier"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "identifier") + } + + protoReq.Identifier, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "identifier", err) + } + + msg, err := client.EpochInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_EpochInfo_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryEpochInfoRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["identifier"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "identifier") + } + + protoReq.Identifier, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "identifier", err) + } + + msg, err := server.EpochInfo(ctx, &protoReq) + return msg, metadata, err + +} + // RegisterQueryHandlerServer registers the http handlers for service Query to "mux". // UnaryRPC :call QueryServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. @@ -175,6 +229,29 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv }) + mux.Handle("GET", pattern_Query_EpochInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_EpochInfo_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_EpochInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + return nil } @@ -256,6 +333,26 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }) + mux.Handle("GET", pattern_Query_EpochInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_EpochInfo_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_EpochInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + return nil } @@ -263,10 +360,14 @@ var ( pattern_Query_EpochInfos_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 1}, []string{"exocore", "epochs", "v1"}, "", runtime.AssumeColonVerbOpt(false))) pattern_Query_CurrentEpoch_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"exocore", "epochs", "v1", "current_epoch", "identifier"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_EpochInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"exocore", "epochs", "v1", "epoch", "identifier"}, "", runtime.AssumeColonVerbOpt(false))) ) var ( forward_Query_EpochInfos_0 = runtime.ForwardResponseMessage forward_Query_CurrentEpoch_0 = runtime.ForwardResponseMessage + + forward_Query_EpochInfo_0 = runtime.ForwardResponseMessage ) diff --git a/x/evm/keeper/grpc_query.go b/x/evm/keeper/grpc_query.go index 851ce1dd0..518611326 100644 --- a/x/evm/keeper/grpc_query.go +++ b/x/evm/keeper/grpc_query.go @@ -472,6 +472,7 @@ func (k Keeper) TraceTx(c context.Context, req *types.QueryTraceTxRequest) (*typ continue } txConfig.TxHash = ethTx.Hash() + // #nosec G115 txConfig.TxIndex = uint(i) // reset gas meter for each transaction ctx = ctx.WithGasMeter(evmostypes.NewInfiniteGasMeterWithLimit(msg.Gas())) @@ -565,6 +566,7 @@ func (k Keeper) TraceBlock(c context.Context, req *types.QueryTraceBlockRequest) result := types.TxTraceResult{} ethTx := tx.AsTransaction() txConfig.TxHash = ethTx.Hash() + // #nosec G115 txConfig.TxIndex = uint(i) traceResult, logIndex, err := k.traceTx(ctx, cfg, txConfig, signer, ethTx, req.TraceConfig, true, nil) if err != nil { diff --git a/x/exomint/keeper/impl_epochs_hooks.go b/x/exomint/keeper/impl_epochs_hooks.go index 2e17a3081..868bd08d5 100644 --- a/x/exomint/keeper/impl_epochs_hooks.go +++ b/x/exomint/keeper/impl_epochs_hooks.go @@ -69,6 +69,7 @@ func (wrapper EpochsHooksWrapper) AfterEpochEnd( sdk.NewAttribute(sdk.AttributeKeyAmount, mintedCoin.Amount.String()), sdk.NewAttribute(types.AttributeEpochIdentifier, identifier), sdk.NewAttribute(types.AttributeEpochNumber, fmt.Sprintf("%d", number)), + sdk.NewAttribute(types.AttributeDenom, mintedCoin.Denom), ), ) diff --git a/x/exomint/types/events.go b/x/exomint/types/events.go index b0adfca8d..14243d033 100644 --- a/x/exomint/types/events.go +++ b/x/exomint/types/events.go @@ -4,4 +4,5 @@ const ( EventTypeMint = ModuleName AttributeEpochIdentifier = "epoch_identifier" AttributeEpochNumber = "epoch_number" + AttributeDenom = "denom" ) diff --git a/x/feedistribution/keeper/hooks_test.go b/x/feedistribution/keeper/hooks_test.go index 1aaecd799..8337eb6ba 100644 --- a/x/feedistribution/keeper/hooks_test.go +++ b/x/feedistribution/keeper/hooks_test.go @@ -56,6 +56,7 @@ func (suite *KeeperTestSuite) prepare() { FromAddress: operatorAddressString, Info: &operatortypes.OperatorInfo{ EarningsAddr: operatorAddressString, + ApproveAddr: operatorAddressString, Commission: stakingtypes.Commission{ CommissionRates: stakingtypes.CommissionRates{ Rate: sdk.NewDec(1), @@ -103,7 +104,7 @@ func (suite *KeeperTestSuite) prepare() { AssetsAddress: assetAddr.Bytes(), OpAmount: amount, } - err = suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParams) + _, err = suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParams) suite.NoError(err) suite.CheckLengthOfValidatorUpdates(0, nil, "deposit but don't delegate") // then delegate it @@ -133,7 +134,7 @@ func (suite *KeeperTestSuite) prepare() { AssetsAddress: assetAddr.Bytes(), OpAmount: additionalAmount, } - err = suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParams) + _, err = suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParams) suite.NoError(err) suite.CheckLengthOfValidatorUpdates(0, nil, "deposit above min but don't delegate") delegationParams = &delegationtypes.DelegationOrUndelegationParams{ diff --git a/x/operator/client/cli/tx.go b/x/operator/client/cli/tx.go index 0e025cee3..247915d00 100644 --- a/x/operator/client/cli/tx.go +++ b/x/operator/client/cli/tx.go @@ -19,8 +19,6 @@ import ( ) const ( - FlagEarningAddr = "earning-addr" - FlagApproveAddr = "approve-addr" FlagMetaInfo = "meta-info" FlagClientChainData = "client-chain-data" ) @@ -75,16 +73,6 @@ func CmdRegisterOperator() *cobra.Command { } f := cmd.Flags() - // EarningAddr may be different from the sender's address. - f.String( - FlagEarningAddr, "", "The address which is used to receive the earning reward in the Exocore chain. "+ - "If not provided, it will default to the sender's address.", - ) - // ApproveAddr may be different from the sender's address. - f.String( - FlagApproveAddr, "", "The address which is used to approve the delegations made to "+ - "the operator. If not provided, it will default to the sender's address.", - ) // OperatorMetaInfo is the name of the operator. f.String( FlagMetaInfo, "", "The operator's meta info (like name)", @@ -109,22 +97,15 @@ func newBuildRegisterOperatorMsg( clientCtx client.Context, fs *flag.FlagSet, ) (*types.RegisterOperatorReq, error) { sender := clientCtx.GetFromAddress() - // #nosec G703 // this only errors if the flag isn't defined. - approveAddr, _ := fs.GetString(FlagApproveAddr) - if approveAddr == "" { - approveAddr = sender.String() - } - // #nosec G703 // this only errors if the flag isn't defined. - earningAddr, _ := fs.GetString(FlagEarningAddr) - if earningAddr == "" { - earningAddr = sender.String() - } metaInfo, _ := fs.GetString(FlagMetaInfo) + if strings.TrimSpace(metaInfo) == "" { + return nil, errorsmod.Wrap(types.ErrCliCmdInputArg, "meta info must be provided") + } msg := &types.RegisterOperatorReq{ FromAddress: sender.String(), Info: &types.OperatorInfo{ - EarningsAddr: earningAddr, - ApproveAddr: approveAddr, + EarningsAddr: sender.String(), + ApproveAddr: sender.String(), OperatorMetaInfo: metaInfo, }, } diff --git a/x/operator/keeper/consensus_keys.go b/x/operator/keeper/consensus_keys.go index 0149504b8..2795e34c5 100644 --- a/x/operator/keeper/consensus_keys.go +++ b/x/operator/keeper/consensus_keys.go @@ -14,6 +14,7 @@ import ( oracletype "github.com/ExocoreNetwork/exocore/x/oracle/types" "github.com/cometbft/cometbft/libs/log" "github.com/cosmos/cosmos-sdk/store/prefix" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" sdk "github.com/cosmos/cosmos-sdk/types" @@ -67,8 +68,6 @@ func (k *Keeper) setOperatorConsKeyForChainID( if k.IsOperatorRemovingKeyFromChainID(ctx, opAccAddr, chainID) { return types.ErrAlreadyRemovingKey } - // convert to bytes - bz := k.cdc.MustMarshal(wrappedKey.ToTmProtoKey()) consAddr := wrappedKey.ToConsAddr() // check if the provided key is already in use by another operator. such use // also includes whether it was replaced by the same operator. this check ensures @@ -88,16 +87,20 @@ func (k *Keeper) setOperatorConsKeyForChainID( return nil } // if this key is different, we will set the vote power of the old key to 0 - // in the validator update. but, we must only do so once in a block, since the + // in the validator update. but, we must only do so once in an epoch, since the // first existing key is the one to replace with 0 vote power and not any others. alreadyRecorded, _ = k.getOperatorPrevConsKeyForChainID(ctx, opAccAddr, chainID) if !alreadyRecorded { + // TODO edge case wherein `key` is not active, so `prevKey` should not be + // recorded. ideally, this should be verified from x/dogfood or x/appchain. + // make a `ConfirmKeyActivation(chainID, consAddr)` function in x/dogfood. + // if IsKeyActive(chainID, prevKey.toConsAddr()) is false, then do not record. k.setOperatorPrevConsKeyForChainID( ctx, opAccAddr, chainID, prevKey, ) } } - k.setOperatorConsKeyForChainIDUnchecked(ctx, opAccAddr, consAddr, chainID, bz) + k.setOperatorConsKeyForChainIDUnchecked(ctx, opAccAddr, chainID, wrappedKey) // only call the hooks if this is not genesis if !genesis { if found { @@ -115,10 +118,11 @@ func (k *Keeper) setOperatorConsKeyForChainID( // no error checking of the input. The caller must do the error checking // and then call this function. func (k Keeper) setOperatorConsKeyForChainIDUnchecked( - ctx sdk.Context, opAccAddr sdk.AccAddress, consAddr sdk.ConsAddress, - chainID string, bz []byte, + ctx sdk.Context, opAccAddr sdk.AccAddress, + chainID string, wrappedKey keytypes.WrappedConsKey, ) { store := ctx.KVStore(k.storeKey) + bz := k.cdc.MustMarshal(wrappedKey.ToTmProtoKey()) // forward lookup // given operator address and chain id, find the consensus key, // since it is sorted by operator address, it helps for faster indexing by operator @@ -137,7 +141,26 @@ func (k Keeper) setOperatorConsKeyForChainIDUnchecked( // prune it once the validator set update id matures (if key replacement). // this pruning will be triggered by the app chain module and will not be // recorded here. - store.Set(types.KeyForChainIDAndConsKeyToOperator(chainID, consAddr), opAccAddr.Bytes()) + consAddr := wrappedKey.ToConsAddr() + store.Set( + types.KeyForChainIDAndConsKeyToOperator(chainID, consAddr), + opAccAddr.Bytes(), + ) + + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeSetConsKey, + sdk.NewAttribute(types.AttributeKeyOperator, opAccAddr.String()), + sdk.NewAttribute(types.AttributeKeyChainID, chainID), + // convert to hex-string to avoid bech32-encoding + sdk.NewAttribute( + types.AttributeKeyConsensusAddress, + // non-checksummed version without "0x" prefix + common.Bytes2Hex(consAddr[:]), + ), + sdk.NewAttribute(types.AttributeKeyConsKeyHex, wrappedKey.ToHex()), + ), + ) } // setOperatorPrevConsKeyForChainID sets the previous (consensus) public key for the given @@ -153,6 +176,15 @@ func (k *Keeper) setOperatorPrevConsKeyForChainID( bz := k.cdc.MustMarshal(prevKey.ToTmProtoKey()) store := ctx.KVStore(k.storeKey) store.Set(types.KeyForChainIDAndOperatorToPrevConsKey(chainID, opAccAddr), bz) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeSetPrevConsKey, + sdk.NewAttribute(types.AttributeKeyOperator, opAccAddr.String()), + sdk.NewAttribute(types.AttributeKeyChainID, chainID), + sdk.NewAttribute(types.AttributeKeyConsensusAddress, prevKey.ToConsAddr().String()), + sdk.NewAttribute(types.AttributeKeyConsKeyHex, prevKey.ToHex()), + ), + ) } // GetOperatorPrevConsKeyForChainID gets the previous (consensus) public key for the given @@ -263,6 +295,16 @@ func (k *Keeper) InitiateOperatorKeyRemovalForChainID( // can only be called if the operator is currently opted in. store := ctx.KVStore(k.storeKey) store.Set(types.KeyForOperatorKeyRemovalForChainID(opAccAddr, chainID), []byte{}) + // TODO: emit after calling hook? + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeInitRemoveConsKey, + sdk.NewAttribute(types.AttributeKeyOperator, opAccAddr.String()), + sdk.NewAttribute(types.AttributeKeyChainID, chainID), + sdk.NewAttribute(types.AttributeKeyConsensusAddress, key.ToConsAddr().String()), + sdk.NewAttribute(types.AttributeKeyConsKeyHex, key.ToHex()), + ), + ) k.Hooks().AfterOperatorKeyRemovalInitiated(ctx, opAccAddr, chainID, key) } @@ -301,6 +343,15 @@ func (k Keeper) CompleteOperatorKeyRemovalForChainID( store.Delete(types.KeyForChainIDAndOperatorToConsKey(chainID, opAccAddr)) store.Delete(types.KeyForChainIDAndConsKeyToOperator(chainID, consAddr)) store.Delete(types.KeyForOperatorKeyRemovalForChainID(opAccAddr, chainID)) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeEndRemoveConsKey, + sdk.NewAttribute(types.AttributeKeyOperator, opAccAddr.String()), + sdk.NewAttribute(types.AttributeKeyChainID, chainID), + sdk.NewAttribute(types.AttributeKeyConsensusAddress, prevKey.ToConsAddr().String()), + sdk.NewAttribute(types.AttributeKeyConsKeyHex, prevKey.ToHex()), + ), + ) return nil } @@ -455,7 +506,16 @@ func (k Keeper) ClearPreviousConsensusKeys(ctx sdk.Context, chainID string) { defer iterator.Close() for ; iterator.Valid(); iterator.Next() { - store.Delete(iterator.Key()) + key := iterator.Key() + store.Delete(key) + operatorAddr := key[1+len(partialKey):] + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeRemovePrevConsKey, + sdk.NewAttribute(types.AttributeKeyOperator, sdk.AccAddress(operatorAddr).String()), + sdk.NewAttribute(types.AttributeKeyChainID, chainID), + ), + ) } } diff --git a/x/operator/keeper/genesis.go b/x/operator/keeper/genesis.go index 95767199d..ae278805b 100644 --- a/x/operator/keeper/genesis.go +++ b/x/operator/keeper/genesis.go @@ -24,8 +24,9 @@ func (k Keeper) InitGenesis(ctx sdk.Context, state types.GenesisState) []abci.Va operatorAddr, _ := sdk.AccAddressFromBech32(addr) for _, detail := range record.Chains { wrappedKey := keytypes.NewWrappedConsKeyFromHex(detail.ConsensusKey) - bz := k.cdc.MustMarshal(wrappedKey.ToTmProtoKey()) - k.setOperatorConsKeyForChainIDUnchecked(ctx, operatorAddr, wrappedKey.ToConsAddr(), detail.ChainID, bz) + k.setOperatorConsKeyForChainIDUnchecked( + ctx, operatorAddr, detail.ChainID, wrappedKey, + ) } } // init the state from the general exporting genesis file diff --git a/x/operator/keeper/grpc_query.go b/x/operator/keeper/grpc_query.go index 5206dd580..b4155ae0c 100644 --- a/x/operator/keeper/grpc_query.go +++ b/x/operator/keeper/grpc_query.go @@ -215,6 +215,7 @@ func (k *Keeper) QueryOperatorSlashInfo(goCtx context.Context, req *types.QueryO slashPrefix := utils.AppendMany(types.KeyPrefixOperatorSlashInfo, assetstype.GetJoinedStoreKeyForPrefix(req.OperatorAddr, strings.ToLower(req.AvsAddress))) store := prefix.NewStore(ctx.KVStore(k.storeKey), slashPrefix) + pageRes, err := query.Paginate(store, req.Pagination, func(key []byte, value []byte) error { ret := &types.OperatorSlashInfo{} // don't use MustUnmarshal to not panic for queries diff --git a/x/operator/keeper/operator.go b/x/operator/keeper/operator.go index 167bb9485..187cbc00e 100644 --- a/x/operator/keeper/operator.go +++ b/x/operator/keeper/operator.go @@ -13,6 +13,7 @@ import ( "github.com/cosmos/cosmos-sdk/store/prefix" sdk "github.com/cosmos/cosmos-sdk/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" assetstype "github.com/ExocoreNetwork/exocore/x/assets/types" operatortypes "github.com/ExocoreNetwork/exocore/x/operator/types" @@ -22,16 +23,27 @@ import ( // There is no current way implemented to delete an operator's registration or edit it. // TODO: implement operator edit function, which should allow editing: // approve address? +// name (meta info) // commission, subject to limits and once within 24 hours. // client chain earnings addresses (maybe append only?) func (k *Keeper) SetOperatorInfo( ctx sdk.Context, addr string, info *operatortypes.OperatorInfo, ) (err error) { + if info == nil { + return errorsmod.Wrap(operatortypes.ErrParameterInvalid, "SetOperatorInfo: operator info is nil") + } // #nosec G703 // already validated in `ValidateBasic` opAccAddr, err := sdk.AccAddressFromBech32(addr) if err != nil { return errorsmod.Wrap(err, "SetOperatorInfo: error occurred when parse acc address from Bech32") } + // already checked that addr is valid, so only check match below + if addr != info.EarningsAddr { + return errorsmod.Wrap(operatortypes.ErrParameterInvalid, "SetOperatorInfo: operator address does not match earnings address") + } + if addr != info.ApproveAddr { + return errorsmod.Wrap(operatortypes.ErrParameterInvalid, "SetOperatorInfo: operator address does not match approve address") + } // if already registered, this request should go to EditOperator. // TODO: EditOperator needs to be implemented. if k.IsOperator(ctx, opAccAddr) { @@ -63,6 +75,22 @@ func (k *Keeper) SetOperatorInfo( store := prefix.NewStore(ctx.KVStore(k.storeKey), operatortypes.KeyPrefixOperatorInfo) bz := k.cdc.MustMarshal(info) store.Set(opAccAddr, bz) + + // TODO validate operator name does not already exist + + ctx.EventManager().EmitEvent( + sdk.NewEvent( + operatortypes.EventTypeRegisterOperator, + sdk.NewAttribute(operatortypes.AttributeKeyOperator, opAccAddr.String()), + sdk.NewAttribute(operatortypes.AttributeKeyMetaInfo, info.OperatorMetaInfo), + sdk.NewAttribute(stakingtypes.AttributeKeyCommissionRate, info.Commission.Rate.String()), + sdk.NewAttribute(operatortypes.AttributeKeyMaxCommissionRate, info.Commission.MaxRate.String()), + sdk.NewAttribute(operatortypes.AttributeKeyMaxChangeRate, info.Commission.MaxChangeRate.String()), + sdk.NewAttribute(operatortypes.AttributeKeyCommissionUpdateTime, sdk.FormatTimeString(info.Commission.UpdateTime)), + // TODO: add ClientChainEarningsAddr.EarningInfoList to the event + ), + ) + return nil } @@ -125,6 +153,17 @@ func (k *Keeper) HandleOptedInfo(ctx sdk.Context, operatorAddr, avsAddr string, // restore the info after handling bz := k.cdc.MustMarshal(info) store.Set(infoKey, bz) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + operatortypes.EventTypeOptInfoUpdated, + sdk.NewAttribute(operatortypes.AttributeKeyOperator, operatorAddr), + sdk.NewAttribute(operatortypes.AttributeKeyAVSAddr, avsAddr), + sdk.NewAttribute(operatortypes.AttributeKeySlashContract, info.SlashContract), + sdk.NewAttribute(operatortypes.AttributeKeyOptInHeight, fmt.Sprintf("%d", info.OptedInHeight)), + sdk.NewAttribute(operatortypes.AttributeKeyOptOutHeight, fmt.Sprintf("%d", info.OptedOutHeight)), + sdk.NewAttribute(operatortypes.AttributeKeyJailed, fmt.Sprintf("%t", info.Jailed)), + ), + ) return nil } diff --git a/x/operator/keeper/operator_info_test.go b/x/operator/keeper/operator_info_test.go index 9913c3393..cc4d65cb2 100644 --- a/x/operator/keeper/operator_info_test.go +++ b/x/operator/keeper/operator_info_test.go @@ -18,7 +18,7 @@ import ( func (suite *OperatorTestSuite) TestOperatorInfo() { info := &operatortype.OperatorInfo{ EarningsAddr: suite.AccAddress.String(), - ApproveAddr: "", + ApproveAddr: suite.AccAddress.String(), OperatorMetaInfo: "test operator", ClientChainEarningsAddr: &operatortype.ClientChainEarningAddrList{ EarningInfoList: []*operatortype.ClientChainEarningAddrInfo{ @@ -42,6 +42,7 @@ func (suite *OperatorTestSuite) TestAllOperators() { OperatorAddress: suite.AccAddress.String(), OperatorInfo: operatortype.OperatorInfo{ EarningsAddr: suite.AccAddress.String(), + ApproveAddr: suite.AccAddress.String(), OperatorMetaInfo: "testOperator", Commission: stakingtypes.NewCommission(sdk.ZeroDec(), sdk.ZeroDec(), sdk.ZeroDec()), }, diff --git a/x/operator/keeper/operator_slash_state.go b/x/operator/keeper/operator_slash_state.go index 467a1702b..d47e2cb5e 100644 --- a/x/operator/keeper/operator_slash_state.go +++ b/x/operator/keeper/operator_slash_state.go @@ -48,6 +48,7 @@ func (k *Keeper) UpdateOperatorSlashInfo(ctx sdk.Context, operatorAddr, avsAddr, // save single operator delegation state bz := k.cdc.MustMarshal(&slashInfo) store.Set(slashInfoKey, bz) + // TODO: add an event for the slash info return nil } diff --git a/x/operator/keeper/opt.go b/x/operator/keeper/opt.go index ecf3cc244..b144ec02a 100644 --- a/x/operator/keeper/opt.go +++ b/x/operator/keeper/opt.go @@ -2,6 +2,7 @@ package keeper import ( "errors" + "fmt" errorsmod "cosmossdk.io/errors" sdkmath "cosmossdk.io/math" @@ -91,6 +92,18 @@ func (k *Keeper) OptIn( if err != nil { return err } + + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeOptIn, + sdk.NewAttribute(types.AttributeKeyOperator, operatorAddress.String()), + sdk.NewAttribute(types.AttributeKeyAVSAddr, avsAddr), + sdk.NewAttribute(types.AttributeKeySlashContract, slashContract), + sdk.NewAttribute(types.AttributeKeyOptInHeight, fmt.Sprintf("%d", optedInfo.OptedInHeight)), + // do not emit the opt out height because the default value is used + ), + ) + return nil } diff --git a/x/operator/keeper/opt_test.go b/x/operator/keeper/opt_test.go index 88cecc04d..8283dd1c5 100644 --- a/x/operator/keeper/opt_test.go +++ b/x/operator/keeper/opt_test.go @@ -32,6 +32,7 @@ func (suite *OperatorTestSuite) registerOperator(operator string) { FromAddress: operator, Info: &operatorTypes.OperatorInfo{ EarningsAddr: operator, + ApproveAddr: operator, }, } _, err := s.OperatorMsgServer.RegisterOperator(s.Ctx, registerReq) @@ -60,7 +61,7 @@ func (suite *OperatorTestSuite) prepareDeposit(stakerAddr, assetAddr common.Addr OpAmount: amount, AssetsAddress: assetAddr[:], } - err := suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParam) + _, err := suite.App.AssetsKeeper.PerformDepositOrWithdraw(suite.Ctx, depositParam) suite.NoError(err) } diff --git a/x/operator/keeper/slash.go b/x/operator/keeper/slash.go index ec7634117..7eacf1231 100644 --- a/x/operator/keeper/slash.go +++ b/x/operator/keeper/slash.go @@ -1,7 +1,7 @@ package keeper import ( - "strings" + "encoding/json" "github.com/ExocoreNetwork/exocore/utils" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" @@ -16,10 +16,13 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" ) -// GetSlashIDForDogfood It use infractionType+'_'+'infractionHeight' as the slashID, because /* the slash */event occurs in dogfood doesn't have a TxID. It isn't submitted through an external transaction. +// GetSlashIDForDogfood It use infractionType+'_'+'infractionHeight' as the slashID, because /* the slash */event occurs in +// dogfood doesn't have a TxID. It isn't submitted through an external transaction. func GetSlashIDForDogfood(infraction stakingtypes.Infraction, infractionHeight int64) string { - // #nosec G701 - return strings.Join([]string{hexutil.EncodeUint64(uint64(infraction)), hexutil.EncodeUint64(uint64(infractionHeight))}, utils.DelimiterForID) + slashIDBytes := utils.AppendMany( + utils.Uint32ToBigEndian(uint32(infraction)), + sdk.Uint64ToBigEndian(uint64(infractionHeight))) + return hexutil.Encode(slashIDBytes) } // SlashFromUndelegation executes the slash from an undelegation, reduce the .ActualCompletedAmount from undelegationRecords @@ -87,6 +90,7 @@ func (k *Keeper) SlashAssets(ctx sdk.Context, snapshotHeight int64, parameter *t SlashUndelegations: make([]types.SlashFromUndelegation, 0), SlashAssetsPool: make([]types.SlashFromAssetsPool, 0), UndelegationFilterHeight: snapshotHeight, + HistoricalVotingPower: parameter.Power, } // slash from the unbonding stakers if parameter.SlashEventHeight < ctx.BlockHeight() { @@ -95,6 +99,17 @@ func (k *Keeper) SlashAssets(ctx sdk.Context, snapshotHeight int64, parameter *t slashFromUndelegation := SlashFromUndelegation(undelegation, newSlashProportion) if slashFromUndelegation != nil { executionInfo.SlashUndelegations = append(executionInfo.SlashUndelegations, *slashFromUndelegation) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeUndelegationSlashed, + sdk.NewAttribute(types.AttributeKeyRecordID, hexutil.Encode(undelegation.GetKey())), + // amount left after slashing has been performed + sdk.NewAttribute(types.AttributeKeyAmount, undelegation.ActualCompletedAmount.String()), + // slashed quantity + sdk.NewAttribute(types.AttributeKeySlashAmount, slashFromUndelegation.Amount.String()), + ), + ) + } return nil } @@ -106,8 +121,9 @@ func (k *Keeper) SlashAssets(ctx sdk.Context, snapshotHeight int64, parameter *t } } - // slash from the assets pool of the operator + // slash from the assets pool of the operator, emits operator asset info status event. opFuncToIterateAssets := func(assetID string, state *assetstype.OperatorAssetInfo) error { + // iterate over each operator + asset and reduce the total amount by the slash amount slashAmount := newSlashProportion.MulInt(state.TotalAmount).TruncateInt() remainingAmount := state.TotalAmount.Sub(slashAmount) // todo: consider slash all assets if the remaining amount is too small, @@ -116,8 +132,7 @@ func (k *Keeper) SlashAssets(ctx sdk.Context, snapshotHeight int64, parameter *t // all shares need to be cleared if the asset amount is slashed to zero, // otherwise there will be a problem in updating the shares when handling // the new delegations. - if remainingAmount.IsZero() && - k.delegationKeeper.HasStakerList(ctx, parameter.Operator.String(), assetID) { + if remainingAmount.IsZero() && k.delegationKeeper.HasStakerList(ctx, parameter.Operator.String(), assetID) { // clear the share of other stakers stakerList, err := k.delegationKeeper.GetStakersByOperator(ctx, parameter.Operator.String(), assetID) if err != nil { @@ -135,11 +150,20 @@ func (k *Keeper) SlashAssets(ctx sdk.Context, snapshotHeight int64, parameter *t state.OperatorShare = sdkmath.LegacyZeroDec() } state.TotalAmount = remainingAmount - // TODO: check if pendingUndelegation also zero => delete this item, and this operator should be opted out if all aasets falls to 0 since the miniself is not satisfied then. + // TODO: check if pendingUndelegation also zero => delete this item, and this operator should be opted out if + // all assets falls to 0 since the miniself is not satisfied then. executionInfo.SlashAssetsPool = append(executionInfo.SlashAssetsPool, types.SlashFromAssetsPool{ AssetID: assetID, Amount: slashAmount, }) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeOperatorAssetSlashed, + sdk.NewAttribute(types.AttributeKeyOperator, parameter.Operator.String()), + sdk.NewAttribute(types.AttributeKeyAssetID, assetID), + sdk.NewAttribute(types.AttributeKeyAmount, slashAmount.String()), + ), + ) return nil } err = k.assetsKeeper.IterateAssetsForOperator(ctx, true, parameter.Operator.String(), nil, opFuncToIterateAssets) @@ -155,10 +179,15 @@ func (k *Keeper) Slash(ctx sdk.Context, parameter *types.SlashInputInfo) error { if err != nil { return err } - snapshotKeyLastHeight, snapshot, err := k.LoadVotingPowerSnapshot(ctx, parameter.AVSAddr, parameter.SlashEventHeight) + slashEventEpochStartHeight, snapshot, err := k.LoadVotingPowerSnapshot(ctx, parameter.AVSAddr, parameter.SlashEventHeight) if err != nil { return err } + k.Logger(ctx).Info("execute slashing", "eventHeight", parameter.SlashEventHeight, "avsAddr", parameter.AVSAddr, "operator", parameter.Operator, "slashID", parameter.SlashID, "slashType", parameter.SlashType) + // Marshal the snapshot to improve the user experience when printing the voting power decimal through the logger + // so we don't have to address the error here. + snapshotJSON, _ := json.Marshal(snapshot) + k.Logger(ctx).Info("the voting power snapshot info is:", "filter_height", slashEventEpochStartHeight, "snapshot", string(snapshotJSON)) // get the historical voting power from the snapshot for the other AVSs if !parameter.IsDogFood { votingPower := types.GetSpecifiedVotingPower(parameter.Operator.String(), snapshot.OperatorVotingPowers) @@ -178,7 +207,7 @@ func (k *Keeper) Slash(ctx sdk.Context, parameter *types.SlashInputInfo) error { // slash assets according to the input information // using cache context to ensure the atomicity of slash execution. cc, writeFunc := ctx.CacheContext() - executionInfo, err := k.SlashAssets(cc, snapshotKeyLastHeight, parameter) + executionInfo, err := k.SlashAssets(cc, slashEventEpochStartHeight, parameter) if err != nil { return err } diff --git a/x/operator/keeper/usd_value.go b/x/operator/keeper/usd_value.go index f16ebc068..5d83ac325 100644 --- a/x/operator/keeper/usd_value.go +++ b/x/operator/keeper/usd_value.go @@ -22,6 +22,7 @@ import ( // The key and value that will be changed is: // AVSAddr + '/' + operatorAddr -> types.OperatorOptedUSDValue (the total USD share of specified operator and Avs) // This function will be called when some assets supported by Avs are delegated/undelegated or slashed. +// Currently this function is only called during tests. func (k *Keeper) UpdateOperatorUSDValue(ctx sdk.Context, avsAddr, operatorAddr string, delta operatortypes.DeltaOperatorUSDInfo) error { store := prefix.NewStore(ctx.KVStore(k.storeKey), operatortypes.KeyPrefixUSDValueForOperator) var key []byte @@ -54,6 +55,17 @@ func (k *Keeper) UpdateOperatorUSDValue(ctx sdk.Context, avsAddr, operatorAddr s } bz := k.cdc.MustMarshal(&usdInfo) store.Set(key, bz) + // emit an event even though this is only used for testing right now + ctx.EventManager().EmitEvent( + sdk.NewEvent( + operatortypes.EventTypeUpdateOperatorUSDValue, + sdk.NewAttribute(operatortypes.AttributeKeyOperator, operatorAddr), + sdk.NewAttribute(operatortypes.AttributeKeyAVSAddr, avsAddr), + sdk.NewAttribute(operatortypes.AttributeKeySelfUSDValue, usdInfo.SelfUSDValue.String()), + sdk.NewAttribute(operatortypes.AttributeKeyTotalUSDValue, usdInfo.TotalUSDValue.String()), + sdk.NewAttribute(operatortypes.AttributeKeyActiveUSDValue, usdInfo.ActiveUSDValue.String()), + ), + ) return nil } @@ -74,6 +86,7 @@ func (k *Keeper) InitOperatorUSDValue(ctx sdk.Context, avsAddr, operatorAddr str } bz := k.cdc.MustMarshal(&initValue) store.Set(key, bz) + // no need to emit event here because DEFAULT 0 in indexer return nil } @@ -90,7 +103,13 @@ func (k *Keeper) DeleteOperatorUSDValue(ctx sdk.Context, avsAddr, operatorAddr s } key = assetstype.GetJoinedStoreKey(strings.ToLower(avsAddr), operatorAddr) store.Delete(key) - + ctx.EventManager().EmitEvent( + sdk.NewEvent( + operatortypes.EventTypeDeleteOperatorUSDValue, + sdk.NewAttribute(operatortypes.AttributeKeyOperator, operatorAddr), + sdk.NewAttribute(operatortypes.AttributeKeyAVSAddr, avsAddr), + ), + ) return nil } @@ -100,7 +119,18 @@ func (k *Keeper) DeleteAllOperatorsUSDValueForAVS(ctx sdk.Context, avsAddr strin defer iterator.Close() for ; iterator.Valid(); iterator.Next() { + parsed, err := assetstype.ParseJoinedStoreKey(iterator.Key(), 2) + if err != nil { + return err + } store.Delete(iterator.Key()) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + operatortypes.EventTypeDeleteOperatorUSDValue, + sdk.NewAttribute(operatortypes.AttributeKeyOperator, parsed[1]), + sdk.NewAttribute(operatortypes.AttributeKeyAVSAddr, avsAddr), + ), + ) } return nil } @@ -143,6 +173,7 @@ func (k *Keeper) GetOperatorOptedUSDValue(ctx sdk.Context, avsAddr, operatorAddr // This function will be called when some assets of operator supported by the specified Avs // are delegated/undelegated or slashed. Additionally, when an operator opts out of // the Avs, this function also will be called. +// Currently not used. func (k *Keeper) UpdateAVSUSDValue(ctx sdk.Context, avsAddr string, opAmount sdkmath.LegacyDec) error { if opAmount.IsNil() || opAmount.IsZero() { return errorsmod.Wrap(operatortypes.ErrValueIsNilOrZero, fmt.Sprintf("UpdateAVSUSDValue the opAmount is:%v", opAmount)) @@ -161,6 +192,13 @@ func (k *Keeper) UpdateAVSUSDValue(ctx sdk.Context, avsAddr string, opAmount sdk } bz := k.cdc.MustMarshal(&totalValue) store.Set(key, bz) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + operatortypes.EventTypeUpdateAVSUSDValue, + sdk.NewAttribute(operatortypes.AttributeKeyAVSAddr, avsAddr), + sdk.NewAttribute(operatortypes.AttributeKeyTotalUSDValue, totalValue.Amount.String()), + ), + ) return nil } @@ -174,6 +212,13 @@ func (k *Keeper) SetAVSUSDValue(ctx sdk.Context, avsAddr string, amount sdkmath. setValue := operatortypes.DecValueField{Amount: amount} bz := k.cdc.MustMarshal(&setValue) store.Set(key, bz) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + operatortypes.EventTypeUpdateAVSUSDValue, + sdk.NewAttribute(operatortypes.AttributeKeyAVSAddr, avsAddr), + sdk.NewAttribute(operatortypes.AttributeKeyTotalUSDValue, amount.String()), + ), + ) return nil } @@ -181,6 +226,12 @@ func (k *Keeper) DeleteAVSUSDValue(ctx sdk.Context, avsAddr string) error { store := prefix.NewStore(ctx.KVStore(k.storeKey), operatortypes.KeyPrefixUSDValueForAVS) key := []byte(strings.ToLower(avsAddr)) store.Delete(key) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + operatortypes.EventTypeDeleteAVSUSDValue, + sdk.NewAttribute(operatortypes.AttributeKeyAVSAddr, avsAddr), + ), + ) return nil } @@ -224,6 +275,16 @@ func (k *Keeper) IterateOperatorsForAVS(ctx sdk.Context, avsAddr string, isUpdat if isUpdate { bz := k.cdc.MustMarshal(&optedUSDValues) store.Set(iterator.Key(), bz) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + operatortypes.EventTypeUpdateOperatorUSDValue, + sdk.NewAttribute(operatortypes.AttributeKeyOperator, keys[1]), + sdk.NewAttribute(operatortypes.AttributeKeyAVSAddr, avsAddr), + sdk.NewAttribute(operatortypes.AttributeKeySelfUSDValue, optedUSDValues.SelfUSDValue.String()), + sdk.NewAttribute(operatortypes.AttributeKeyTotalUSDValue, optedUSDValues.TotalUSDValue.String()), + sdk.NewAttribute(operatortypes.AttributeKeyActiveUSDValue, optedUSDValues.ActiveUSDValue.String()), + ), + ) } } return nil @@ -317,6 +378,13 @@ func (k *Keeper) IterateAVSUSDValues(ctx sdk.Context, isUpdate bool, opFunc func if isUpdate { bz := k.cdc.MustMarshal(&usdValue) store.Set(iterator.Key(), bz) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + operatortypes.EventTypeUpdateAVSUSDValue, + sdk.NewAttribute(operatortypes.AttributeKeyAVSAddr, string(iterator.Key())), + sdk.NewAttribute(operatortypes.AttributeKeyTotalUSDValue, usdValue.Amount.String()), + ), + ) } } return nil @@ -390,7 +458,9 @@ func (k *Keeper) CalculateUSDValueForOperator( return err } decimal = assetInfo.AssetBasicInfo.Decimals - ret.StakingAndWaitUnbonding = ret.StakingAndWaitUnbonding.Add(CalculateUSDValue(state.TotalAmount.Add(state.PendingUndelegationAmount), price.Value, decimal, price.Decimal)) + usdValue := CalculateUSDValue(state.TotalAmount.Add(state.PendingUndelegationAmount), price.Value, decimal, price.Decimal) + ctx.Logger().Info("CalculateUSDValueForOperator: get price for slash", "assetID", assetID, "assetDecimal", decimal, "price", price, "totalAmount", state.TotalAmount, "pendingUndelegationAmount", state.PendingUndelegationAmount, "StakingAndWaitUnbonding", ret.StakingAndWaitUnbonding, "addUSDValue", usdValue) + ret.StakingAndWaitUnbonding.AddMut(usdValue) } else { if prices == nil { return errorsmod.Wrap(operatortypes.ErrValueIsNilOrZero, "CalculateUSDValueForOperator prices map is nil") @@ -403,13 +473,13 @@ func (k *Keeper) CalculateUSDValueForOperator( if !ok { return errorsmod.Wrap(operatortypes.ErrKeyNotExistInMap, "CalculateUSDValueForOperator map: decimals, key: assetID") } - ret.Staking = ret.Staking.Add(CalculateUSDValue(state.TotalAmount, price.Value, decimal, price.Decimal)) + ret.Staking.AddMut(CalculateUSDValue(state.TotalAmount, price.Value, decimal, price.Decimal)) // calculate the token amount from the share for the operator selfAmount, err := delegationkeeper.TokensFromShares(state.OperatorShare, state.TotalShare, state.TotalAmount) if err != nil { return err } - ret.SelfStaking = ret.SelfStaking.Add(CalculateUSDValue(selfAmount, price.Value, decimal, price.Decimal)) + ret.SelfStaking.AddMut(CalculateUSDValue(selfAmount, price.Value, decimal, price.Decimal)) } return nil } diff --git a/x/operator/types/events.go b/x/operator/types/events.go new file mode 100644 index 000000000..b0e78c158 --- /dev/null +++ b/x/operator/types/events.go @@ -0,0 +1,53 @@ +package types + +// DONTCOVER + +// x/operator events +const ( + EventTypeRegisterOperator = "register_operator" + AttributeKeyOperator = "operator" + AttributeKeyMetaInfo = "meta_info" + AttributeKeyMaxCommissionRate = "max_commission_rate" + AttributeKeyMaxChangeRate = "max_change_rate" + AttributeKeyCommissionUpdateTime = "commission_update_time" + + EventTypeOptIn = "opt_in" + AttributeKeyAVSAddr = "avs_addr" + AttributeKeySlashContract = "slash_contract" + AttributeKeyOptInHeight = "opt_in_height" + AttributeKeyOptOutHeight = "opt_out_height" + AttributeKeyJailed = "jailed" + + EventTypeOptInfoUpdated = "update_opt_info" + + EventTypeSetConsKey = "set_cons_key" + AttributeKeyChainID = "chain_id" + AttributeKeyConsensusAddress = "consensus_address" + AttributeKeyConsKeyHex = "cons_key_hex" + + EventTypeInitRemoveConsKey = "init_remove_cons_key" + + EventTypeEndRemoveConsKey = "end_remove_cons_key" + + EventTypeSetPrevConsKey = "set_prev_cons_key" + EventTypeRemovePrevConsKey = "remove_prev_cons_key" + + EventTypeUpdateOperatorUSDValue = "update_operator_usd_value" + AttributeKeySelfUSDValue = "self_usd_value" + AttributeKeyTotalUSDValue = "total_usd_value" + AttributeKeyActiveUSDValue = "active_usd_value" + + EventTypeDeleteOperatorUSDValue = "delete_operator_usd_value" + + EventTypeUpdateAVSUSDValue = "update_avs_usd_value" + + EventTypeDeleteAVSUSDValue = "delete_avs_usd_value" + + EventTypeUndelegationSlashed = "undelegation_slashed" + AttributeKeyRecordID = "record_id" + AttributeKeyAmount = "amount" + AttributeKeySlashAmount = "slash_amount" + + EventTypeOperatorAssetSlashed = "operator_asset_slashed" + AttributeKeyAssetID = "asset_id" +) diff --git a/x/operator/types/keys.go b/x/operator/types/keys.go index ad63c5de5..24b0da57a 100644 --- a/x/operator/types/keys.go +++ b/x/operator/types/keys.go @@ -157,7 +157,7 @@ func KeyForVotingPowerSnapshot(avs common.Address, height int64) []byte { return utils.AppendMany( avs.Bytes(), // Append the height - sdk.Uint64ToBigEndian(uint64(height)), + sdk.Uint64ToBigEndian(uint64(height)), // #nosec G115 // height is not negative ) } diff --git a/x/operator/types/tx.go b/x/operator/types/tx.go new file mode 100644 index 000000000..0a62a30b9 --- /dev/null +++ b/x/operator/types/tx.go @@ -0,0 +1,13 @@ +package types + +import ( + "github.com/cosmos/cosmos-sdk/codec" +) + +// interface guard +var _ codec.ProtoMarshaler = &DecValueField{} + +// String implements the Stringer interface for DecValueField. +func (d *DecValueField) String() string { + return d.Amount.String() +} diff --git a/x/operator/types/tx.pb.go b/x/operator/types/tx.pb.go index 9e00f26ea..28b8fbb16 100644 --- a/x/operator/types/tx.pb.go +++ b/x/operator/types/tx.pb.go @@ -74,9 +74,8 @@ type DecValueField struct { Amount github_com_cosmos_cosmos_sdk_types.Dec `protobuf:"bytes,1,opt,name=amount,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Dec" json:"amount"` } -func (m *DecValueField) Reset() { *m = DecValueField{} } -func (m *DecValueField) String() string { return proto.CompactTextString(m) } -func (*DecValueField) ProtoMessage() {} +func (m *DecValueField) Reset() { *m = DecValueField{} } +func (*DecValueField) ProtoMessage() {} func (*DecValueField) Descriptor() ([]byte, []int) { return fileDescriptor_b229d5663e4df167, []int{0} } @@ -762,6 +761,8 @@ type SlashExecutionInfo struct { SlashAssetsPool []SlashFromAssetsPool `protobuf:"bytes,4,rep,name=slash_assets_pool,json=slashAssetsPool,proto3" json:"slash_assets_pool"` // undelegation_filter_height records the height before which undelegations are not slashed UndelegationFilterHeight int64 `protobuf:"varint,5,opt,name=undelegation_filter_height,json=undelegationFilterHeight,proto3" json:"undelegation_filter_height,omitempty"` + // the historical voting power at the time of the slash event. + HistoricalVotingPower int64 `protobuf:"varint,6,opt,name=historical_voting_power,json=historicalVotingPower,proto3" json:"historical_voting_power,omitempty"` } func (m *SlashExecutionInfo) Reset() { *m = SlashExecutionInfo{} } @@ -818,6 +819,13 @@ func (m *SlashExecutionInfo) GetUndelegationFilterHeight() int64 { return 0 } +func (m *SlashExecutionInfo) GetHistoricalVotingPower() int64 { + if m != nil { + return m.HistoricalVotingPower + } + return 0 +} + // OperatorSlashInfo is the slash info of operator type OperatorSlashInfo struct { // slash_contract is the address of slash contract @@ -1262,118 +1270,120 @@ func init() { func init() { proto.RegisterFile("exocore/operator/v1/tx.proto", fileDescriptor_b229d5663e4df167) } var fileDescriptor_b229d5663e4df167 = []byte{ - // 1769 bytes of a gzipped FileDescriptorProto + // 1794 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcd, 0x6f, 0x1b, 0xc7, - 0x15, 0xd7, 0x52, 0xb2, 0x2d, 0x3e, 0x92, 0x22, 0x35, 0xf2, 0x07, 0xcd, 0xba, 0xa2, 0xbd, 0xa9, - 0x6d, 0x59, 0xad, 0x48, 0x58, 0x69, 0x0a, 0xc4, 0x4d, 0x81, 0xea, 0x13, 0x66, 0x23, 0x93, 0xc2, - 0x52, 0x32, 0xd0, 0x14, 0xed, 0x76, 0x45, 0x8e, 0x96, 0x6b, 0x2f, 0x77, 0xb6, 0x3b, 0x43, 0x46, - 0xca, 0xa9, 0xc8, 0x29, 0x28, 0x7a, 0x28, 0x90, 0x53, 0x81, 0x1e, 0x7c, 0x2a, 0x7a, 0xf4, 0x21, - 0x3d, 0x16, 0x6d, 0x6f, 0x39, 0x06, 0x6e, 0x0f, 0x6d, 0x0f, 0x4a, 0x21, 0x17, 0x70, 0xff, 0x88, - 0x16, 0x28, 0xe6, 0x63, 0xa9, 0xa1, 0xbd, 0x94, 0x2d, 0x48, 0xe9, 0x25, 0xf1, 0xbe, 0xf7, 0xe6, - 0xfd, 0x7e, 0xef, 0xcd, 0xfb, 0x18, 0x0a, 0xae, 0xe1, 0x3d, 0xd2, 0x22, 0x11, 0xae, 0x92, 0x10, - 0x47, 0x0e, 0x23, 0x51, 0xb5, 0x7f, 0xb7, 0xca, 0xf6, 0x2a, 0x61, 0x44, 0x18, 0x41, 0x33, 0x4a, - 0x5b, 0x89, 0xb5, 0x95, 0xfe, 0xdd, 0xd2, 0xb4, 0xd3, 0xf5, 0x02, 0x52, 0x15, 0xff, 0x95, 0x76, - 0xa5, 0x2b, 0x2d, 0x42, 0xbb, 0x84, 0x56, 0xbb, 0xd4, 0xe5, 0xe7, 0xbb, 0xd4, 0x55, 0x8a, 0x6f, - 0x28, 0x05, 0x65, 0xce, 0x63, 0x2f, 0xe0, 0xca, 0x1d, 0xcc, 0x9c, 0xbb, 0xf1, 0xb7, 0xb2, 0xba, - 0x2a, 0xad, 0x6c, 0xf1, 0x55, 0x95, 0x1f, 0x4a, 0x75, 0xd1, 0x25, 0x2e, 0x91, 0x72, 0xfe, 0x2f, - 0x25, 0xbd, 0xe6, 0x12, 0xe2, 0xfa, 0xb8, 0xea, 0x84, 0x5e, 0xd5, 0x09, 0x02, 0xc2, 0x1c, 0xe6, - 0x91, 0x40, 0x9d, 0x31, 0x31, 0xe4, 0x56, 0x71, 0xeb, 0xa1, 0xe3, 0xf7, 0xf0, 0xba, 0x87, 0xfd, - 0x36, 0xda, 0x82, 0xf3, 0x4e, 0x97, 0xf4, 0x02, 0x56, 0x34, 0xae, 0x1b, 0x73, 0xe9, 0xe5, 0xf7, - 0x3e, 0x3f, 0x28, 0x8f, 0xfd, 0xe3, 0xa0, 0x7c, 0xcb, 0xf5, 0x58, 0xa7, 0xb7, 0x53, 0x69, 0x91, - 0xae, 0x42, 0x55, 0xff, 0x5b, 0xa0, 0xed, 0xc7, 0x55, 0xb6, 0x1f, 0x62, 0x5a, 0x59, 0xc5, 0xad, - 0x67, 0x9f, 0x2d, 0x80, 0x22, 0xb5, 0x8a, 0x5b, 0x96, 0xf2, 0x65, 0xfe, 0x27, 0x05, 0x97, 0x1a, - 0x2a, 0x2f, 0x8d, 0x90, 0xe1, 0xf6, 0x76, 0x73, 0x55, 0x80, 0xa2, 0x08, 0xa6, 0x28, 0xf6, 0x77, - 0xed, 0x1e, 0x6d, 0xdb, 0x7d, 0x2e, 0x51, 0xb8, 0x1b, 0x27, 0xc3, 0x3d, 0x3c, 0x28, 0x67, 0x9b, - 0xd8, 0xdf, 0x8d, 0xfd, 0xbe, 0xc4, 0x23, 0xcb, 0x31, 0xb6, 0x69, 0x5b, 0x62, 0xf6, 0x20, 0xcf, - 0x08, 0x73, 0x7c, 0x0d, 0x34, 0x25, 0x40, 0x1f, 0x9c, 0x18, 0x34, 0xb7, 0xc5, 0x1d, 0x8d, 0x40, - 0xcd, 0x09, 0x94, 0x01, 0xec, 0x1e, 0x14, 0x9c, 0x16, 0xf3, 0xfa, 0x58, 0xc3, 0x1d, 0x17, 0xb8, - 0xf5, 0x13, 0xe3, 0x4e, 0x2d, 0x09, 0x4f, 0x23, 0x80, 0xa7, 0x24, 0x4e, 0x8c, 0x6c, 0xfe, 0xde, - 0x80, 0x99, 0x38, 0xfd, 0x0f, 0x09, 0xf3, 0x02, 0x77, 0x93, 0x7c, 0x88, 0x23, 0xf4, 0x3d, 0xc8, - 0xc5, 0xd5, 0x6a, 0x3b, 0xed, 0x76, 0xa4, 0x72, 0x5f, 0x7c, 0xf6, 0xd9, 0xc2, 0x45, 0xe5, 0x6e, - 0xa9, 0xdd, 0x8e, 0x30, 0xa5, 0x4d, 0x16, 0x79, 0x81, 0x6b, 0x65, 0x63, 0x73, 0x2e, 0x46, 0x36, - 0x64, 0xfb, 0xc2, 0x9b, 0x1d, 0x72, 0x77, 0x2a, 0x89, 0xa7, 0xab, 0x98, 0x4c, 0xff, 0x88, 0x9f, - 0xf9, 0x65, 0x0a, 0x66, 0x34, 0xbe, 0xcd, 0xc0, 0x09, 0x69, 0x87, 0x30, 0xf4, 0x08, 0x90, 0xbc, - 0xc0, 0x21, 0xf8, 0xb3, 0x28, 0xd8, 0x82, 0xf0, 0xab, 0xe7, 0xe8, 0x27, 0x70, 0x79, 0x90, 0x23, - 0x1d, 0x8e, 0x16, 0x53, 0xd7, 0xc7, 0xe7, 0x32, 0x8b, 0x73, 0x95, 0x84, 0xc6, 0xaf, 0x24, 0x64, - 0xdb, 0xba, 0x48, 0x5e, 0x15, 0x52, 0x54, 0x81, 0x19, 0xdf, 0xa1, 0xcc, 0x6e, 0x75, 0x9c, 0xc0, - 0xc5, 0x6d, 0xbb, 0x83, 0x3d, 0xb7, 0xc3, 0x44, 0x61, 0x8c, 0x5b, 0xd3, 0x5c, 0xb5, 0x22, 0x35, - 0xf7, 0x85, 0x02, 0xdd, 0x81, 0x02, 0x0e, 0x49, 0xab, 0x63, 0x7b, 0x6d, 0x1c, 0x30, 0x6f, 0xd7, - 0xc3, 0x51, 0x71, 0x82, 0x47, 0x6e, 0xe5, 0x85, 0xbc, 0x36, 0x10, 0xa3, 0x1b, 0x90, 0x95, 0xa6, - 0x41, 0xaf, 0xbb, 0x83, 0xa3, 0xe2, 0x39, 0xe1, 0x33, 0x23, 0x64, 0x75, 0x21, 0x32, 0x7f, 0x0a, - 0x53, 0x71, 0x56, 0xef, 0x63, 0x3f, 0xc4, 0xd1, 0x28, 0x3e, 0xc6, 0x28, 0x3e, 0xb3, 0x90, 0xe9, - 0x38, 0xd4, 0x26, 0x21, 0xb3, 0x49, 0x8f, 0x89, 0x1a, 0x98, 0xb4, 0xd2, 0x1d, 0x87, 0x36, 0x42, - 0xd6, 0xe8, 0x31, 0x73, 0x1f, 0x4a, 0x2b, 0xbe, 0x87, 0x03, 0x7e, 0xcc, 0x0b, 0xd6, 0x9c, 0x28, - 0xf0, 0x02, 0x97, 0x97, 0xcf, 0x86, 0x47, 0x19, 0xfa, 0x11, 0x4c, 0x63, 0x29, 0xb2, 0xbd, 0x60, - 0x97, 0xd8, 0xbe, 0x47, 0x39, 0x16, 0x4f, 0x6c, 0x35, 0x31, 0xb1, 0xc9, 0xbe, 0x6a, 0xc1, 0x2e, - 0xb1, 0xf2, 0xca, 0x13, 0xff, 0xe0, 0xce, 0xcd, 0x5f, 0x1b, 0xa3, 0xb0, 0xb9, 0x09, 0xfa, 0x3e, - 0x20, 0xff, 0x23, 0xbb, 0x25, 0x0c, 0x78, 0xb8, 0x5e, 0x60, 0x7b, 0x6d, 0x11, 0xe8, 0xc4, 0xf2, - 0xcc, 0xe1, 0x41, 0x39, 0xbf, 0xf1, 0x91, 0x76, 0xba, 0xb6, 0x6a, 0xe5, 0xfd, 0x21, 0x41, 0x1b, - 0xbd, 0x0b, 0x57, 0x87, 0x8e, 0xc7, 0xa1, 0x88, 0x5e, 0x12, 0xdd, 0x60, 0x5d, 0x6e, 0x25, 0x12, - 0x30, 0xff, 0x9c, 0x82, 0x6c, 0x5c, 0x24, 0x82, 0xcd, 0x5b, 0x90, 0x53, 0xc7, 0xa9, 0xd6, 0x8b, - 0x56, 0x36, 0x16, 0x8a, 0x8e, 0xbb, 0x01, 0x59, 0x27, 0x0c, 0x23, 0xd2, 0xc7, 0x3a, 0x46, 0x46, - 0xc9, 0x84, 0xc9, 0xb7, 0x00, 0x0d, 0xea, 0xb5, 0x8b, 0x99, 0x23, 0xf2, 0x2a, 0xe7, 0x8c, 0x55, - 0x88, 0x35, 0x0f, 0x30, 0x73, 0x04, 0xaa, 0x0f, 0xa5, 0xa4, 0x08, 0x14, 0x05, 0x5e, 0x57, 0x27, - 0xbb, 0x08, 0x9e, 0x77, 0xeb, 0xca, 0xab, 0x31, 0x4b, 0xfa, 0x0f, 0x00, 0x5a, 0xa4, 0xdb, 0xf5, - 0x28, 0xf5, 0x48, 0x20, 0xca, 0x31, 0xb3, 0x68, 0x56, 0x54, 0xfb, 0xc5, 0x7b, 0x4e, 0xed, 0xbd, - 0xca, 0xca, 0xc0, 0x72, 0x39, 0xcd, 0x7b, 0xfa, 0x77, 0x2f, 0x9e, 0xce, 0x1b, 0x96, 0xe6, 0xc0, - 0xfc, 0x8d, 0x01, 0x69, 0xb1, 0x4d, 0x44, 0x28, 0x37, 0x61, 0x8a, 0xfa, 0x0e, 0xed, 0xd8, 0x2d, - 0x12, 0xb0, 0xc8, 0x69, 0xa9, 0x0d, 0x66, 0xe5, 0x84, 0x74, 0x45, 0x09, 0xd1, 0x2d, 0xc8, 0x13, - 0x7e, 0xc6, 0xf6, 0x82, 0xb8, 0xb6, 0x79, 0x16, 0x27, 0xac, 0x1c, 0x91, 0xae, 0x54, 0x5d, 0xcf, - 0x41, 0x41, 0xda, 0x91, 0x1e, 0xd3, 0x9b, 0x72, 0xc2, 0x9a, 0x12, 0xf2, 0x46, 0x8f, 0x29, 0xcb, - 0xcb, 0x70, 0xfe, 0x91, 0xe3, 0xf9, 0xb8, 0x2d, 0xf2, 0x35, 0x69, 0xa9, 0x2f, 0xf3, 0x0f, 0x06, - 0x4c, 0x2b, 0x7a, 0x4b, 0x94, 0x62, 0xd6, 0x64, 0x0e, 0xc3, 0xa7, 0x5a, 0xb0, 0xb5, 0x80, 0x69, - 0xf3, 0xaa, 0x16, 0xb0, 0x78, 0xc1, 0x22, 0x0b, 0xce, 0xe9, 0x8b, 0xec, 0x74, 0x43, 0x50, 0xba, - 0x32, 0xff, 0x64, 0xc0, 0xa5, 0x26, 0xcf, 0xdd, 0x7a, 0x44, 0xba, 0xdb, 0x41, 0x1b, 0xfb, 0xd8, - 0x15, 0x8f, 0x07, 0x74, 0x07, 0xd2, 0xfc, 0xb6, 0x70, 0x14, 0x37, 0x4c, 0x7a, 0x39, 0x7b, 0x78, - 0x50, 0x9e, 0x6c, 0x0a, 0x61, 0x6d, 0xd5, 0x9a, 0x94, 0xea, 0x5a, 0x1b, 0xdd, 0x82, 0x49, 0x87, - 0x07, 0xcf, 0x2d, 0x25, 0xb7, 0xcc, 0xe1, 0x41, 0xf9, 0x82, 0x48, 0x48, 0x6d, 0xd5, 0xba, 0x20, - 0x94, 0x35, 0xfd, 0xdd, 0x31, 0x7e, 0x76, 0x69, 0x31, 0x3f, 0x35, 0x60, 0x66, 0x10, 0x82, 0xc0, - 0xa4, 0x9b, 0x84, 0xf8, 0x43, 0xac, 0x8c, 0x37, 0x62, 0x95, 0x3a, 0x43, 0x56, 0x7f, 0x1f, 0x07, - 0x24, 0x58, 0xad, 0xed, 0xe1, 0x56, 0x8f, 0x67, 0x54, 0x14, 0xb0, 0x0b, 0x05, 0x59, 0xc0, 0x61, - 0x44, 0x42, 0x12, 0x71, 0xf9, 0x99, 0xec, 0xb4, 0xbc, 0xf0, 0xba, 0x39, 0x70, 0x8a, 0x7e, 0x0c, - 0x19, 0x09, 0x74, 0x76, 0x25, 0x03, 0xc2, 0xa1, 0x7c, 0xe7, 0x38, 0x30, 0x23, 0xdd, 0xf7, 0xb4, - 0x9a, 0xa1, 0xc5, 0x71, 0x31, 0xd5, 0xe7, 0x13, 0x87, 0x49, 0x62, 0x99, 0x2d, 0x4f, 0x70, 0x4a, - 0x16, 0x12, 0xce, 0x74, 0x05, 0x45, 0x1f, 0xc0, 0xb4, 0x84, 0x10, 0x17, 0x45, 0xed, 0x90, 0x10, - 0xbf, 0x38, 0x71, 0xcc, 0x3e, 0x4e, 0x28, 0x02, 0xe5, 0x5e, 0x66, 0x47, 0xab, 0x8d, 0xf7, 0xa0, - 0xa4, 0x13, 0xb7, 0x77, 0x3d, 0x9f, 0xe1, 0x28, 0x1e, 0x01, 0x72, 0x87, 0x16, 0x75, 0x8b, 0x75, - 0x61, 0x20, 0x87, 0x81, 0xf9, 0xdf, 0x14, 0x6f, 0x7a, 0x09, 0x2c, 0x40, 0x4f, 0x32, 0x9b, 0xee, - 0x40, 0x81, 0xf6, 0x76, 0xba, 0x1e, 0x63, 0x47, 0x8b, 0x37, 0x25, 0x00, 0xf3, 0x03, 0xb9, 0x1a, - 0x3a, 0x7c, 0xb7, 0xf7, 0xf9, 0xdc, 0x1e, 0x7a, 0x2f, 0x64, 0x84, 0x4c, 0x99, 0x7c, 0x0d, 0xd2, - 0x1e, 0xb5, 0xfb, 0x98, 0x91, 0xc1, 0x68, 0x9a, 0xf4, 0xe8, 0x43, 0xf1, 0x9d, 0x58, 0x6c, 0xe7, - 0xbe, 0x8a, 0x62, 0xfb, 0x3a, 0xc8, 0xda, 0xb0, 0xf9, 0x89, 0xe2, 0xf9, 0xeb, 0xc6, 0x5c, 0xce, - 0x4a, 0x0b, 0xc9, 0xd6, 0x7e, 0x88, 0x51, 0x1d, 0xa6, 0x70, 0xdc, 0x05, 0x72, 0x55, 0x5d, 0x10, - 0x6b, 0xe1, 0xf6, 0xe8, 0x6b, 0x1c, 0xea, 0x1a, 0x2b, 0x87, 0xf5, 0x4f, 0xf3, 0x8f, 0x06, 0xcc, - 0x58, 0xd8, 0xf5, 0x28, 0xc3, 0x51, 0x7c, 0x0f, 0x16, 0xfe, 0x19, 0xfa, 0x2e, 0x64, 0x77, 0x23, - 0xd2, 0x15, 0x7b, 0x0d, 0x53, 0xfa, 0xda, 0x97, 0x6e, 0x86, 0x5b, 0x2b, 0x11, 0x7a, 0x07, 0x26, - 0x04, 0xb5, 0x94, 0xa0, 0x76, 0xe3, 0xd8, 0x17, 0x9f, 0x20, 0x25, 0xcc, 0xef, 0x7d, 0xfb, 0x93, - 0x27, 0xe5, 0xb1, 0x7f, 0x3f, 0x29, 0x8f, 0x7d, 0xfc, 0xe2, 0xe9, 0x7c, 0x66, 0xfd, 0xc8, 0xe1, - 0x2f, 0x5e, 0x3c, 0x9d, 0xbf, 0xa2, 0x25, 0x53, 0x3f, 0x6b, 0x96, 0xa0, 0xf8, 0x6a, 0x00, 0x34, - 0x24, 0x01, 0xc5, 0xe6, 0x97, 0x06, 0xe4, 0x1a, 0x21, 0xab, 0x05, 0x8c, 0x2c, 0x3d, 0x6c, 0x9e, - 0x3a, 0xae, 0x32, 0x64, 0x9c, 0x3e, 0x1d, 0x9c, 0x95, 0xaf, 0x09, 0x70, 0xfa, 0x34, 0x36, 0x78, - 0x17, 0xf2, 0x61, 0x6f, 0xc7, 0xf7, 0x5a, 0xf6, 0x63, 0xbc, 0x6f, 0x3f, 0xa2, 0x24, 0x50, 0xe3, - 0x79, 0x9a, 0xff, 0xf6, 0xd9, 0x14, 0xaa, 0xf7, 0xf1, 0xfe, 0x0f, 0x9a, 0x8d, 0xba, 0x95, 0x0b, - 0x07, 0x9f, 0x94, 0x04, 0xf7, 0xde, 0x39, 0x2e, 0xf8, 0xe2, 0x50, 0xf0, 0x5a, 0x3c, 0xe6, 0x45, - 0x40, 0xba, 0x40, 0xc5, 0xfd, 0x5b, 0x03, 0xa6, 0xe4, 0x7b, 0xb2, 0xb1, 0xfb, 0xff, 0x08, 0xfc, - 0xde, 0x77, 0x8e, 0x63, 0x7f, 0x75, 0x98, 0xbd, 0xc6, 0xca, 0xbc, 0xc4, 0x7f, 0x68, 0x69, 0x12, - 0xc5, 0xff, 0x99, 0x01, 0xb9, 0x26, 0x66, 0x2b, 0x24, 0xa0, 0xef, 0xe3, 0x7d, 0x4e, 0x7f, 0x11, - 0x2e, 0xbc, 0x29, 0xf3, 0xd8, 0xf0, 0x2b, 0xbd, 0xae, 0xbb, 0x7a, 0xc0, 0x31, 0xe2, 0xcb, 0x57, - 0x35, 0x14, 0x02, 0xbf, 0x2a, 0x5d, 0x20, 0x43, 0x9d, 0xf7, 0x21, 0xdd, 0x1c, 0x74, 0x77, 0x09, - 0x2e, 0x37, 0x37, 0x96, 0x9a, 0xf7, 0xed, 0xad, 0x1f, 0x6e, 0xae, 0xd9, 0xdb, 0xf5, 0xe6, 0xe6, - 0xda, 0x4a, 0x6d, 0xbd, 0xb6, 0xb6, 0x5a, 0x18, 0x43, 0xd7, 0xa0, 0xa8, 0xe9, 0x6a, 0xf5, 0xe6, - 0xd6, 0x52, 0x7d, 0xcb, 0x16, 0xa2, 0x82, 0x81, 0x6e, 0xc2, 0x0d, 0x4d, 0x5b, 0x6f, 0xc4, 0x06, - 0x4b, 0xf5, 0xb5, 0xc6, 0x76, 0x53, 0x99, 0xa5, 0x16, 0xff, 0x3a, 0x01, 0xe3, 0x0f, 0xa8, 0x8b, - 0x9e, 0x18, 0x50, 0x78, 0xb9, 0x6b, 0x50, 0xf2, 0x2a, 0x48, 0x98, 0x0e, 0xa5, 0x85, 0x37, 0xb4, - 0x54, 0xd7, 0xf9, 0xf6, 0xc7, 0x7f, 0xf9, 0xd7, 0xa7, 0xa9, 0x05, 0xf3, 0x9b, 0xd5, 0xe4, 0x3f, - 0x09, 0x55, 0x93, 0x26, 0xd0, 0x27, 0x06, 0xc0, 0x51, 0xbe, 0x90, 0x99, 0x3c, 0xe0, 0xf4, 0x0c, - 0x97, 0x6e, 0xbf, 0xd6, 0x46, 0x11, 0x5a, 0x10, 0x84, 0x6e, 0x9b, 0x37, 0x47, 0x11, 0x1a, 0x2e, - 0x3e, 0x4e, 0xe5, 0xa8, 0xcb, 0x46, 0x50, 0x19, 0xea, 0xcb, 0x11, 0x54, 0x12, 0x5a, 0xf5, 0xb5, - 0x54, 0x86, 0xe7, 0xd7, 0x2f, 0x0d, 0xc8, 0x68, 0x1d, 0x83, 0xde, 0x1a, 0x85, 0xa3, 0x75, 0x59, - 0x69, 0xee, 0xf5, 0x46, 0x8a, 0x4d, 0x45, 0xb0, 0x99, 0x33, 0x6f, 0x1d, 0xc3, 0x46, 0xf7, 0x7c, - 0xee, 0xe7, 0xfc, 0x57, 0xc6, 0xf2, 0xc6, 0xe7, 0x87, 0xb3, 0xc6, 0x17, 0x87, 0xb3, 0xc6, 0x3f, - 0x0f, 0x67, 0x8d, 0x5f, 0x3d, 0x9f, 0x1d, 0xfb, 0xe2, 0xf9, 0xec, 0xd8, 0xdf, 0x9e, 0xcf, 0x8e, - 0x7d, 0xb0, 0xa8, 0x6d, 0xc5, 0x35, 0xe9, 0xb2, 0x8e, 0xd9, 0x87, 0x24, 0x7a, 0x3c, 0x40, 0xd8, - 0x3b, 0xc2, 0x10, 0x5b, 0x72, 0xe7, 0xbc, 0xf8, 0x5b, 0xdb, 0xdb, 0xff, 0x0b, 0x00, 0x00, 0xff, - 0xff, 0x9d, 0x68, 0xf9, 0xb8, 0x41, 0x14, 0x00, 0x00, + 0x15, 0xd7, 0x52, 0xb4, 0x2d, 0x3e, 0x92, 0x22, 0x35, 0xf2, 0x07, 0xcd, 0xba, 0x62, 0xbc, 0xa9, + 0x6d, 0x59, 0xad, 0x48, 0x58, 0x69, 0x02, 0xc4, 0x4d, 0x81, 0xea, 0x13, 0x66, 0x23, 0x93, 0xc2, + 0x52, 0x32, 0xd0, 0x14, 0xed, 0x76, 0xb5, 0x1c, 0x92, 0x63, 0x2d, 0x77, 0xb6, 0x3b, 0x43, 0x46, + 0xca, 0xa9, 0xc8, 0x29, 0x28, 0x7a, 0x68, 0x91, 0x4b, 0x0b, 0xf4, 0xe0, 0x53, 0xd1, 0xa3, 0x0f, + 0xe9, 0xb1, 0x68, 0x7b, 0xcb, 0x31, 0x70, 0x7b, 0x28, 0x7a, 0x50, 0x0a, 0xb9, 0x80, 0xfb, 0x47, + 0xb4, 0x40, 0x31, 0xb3, 0xb3, 0xd4, 0xd0, 0x26, 0x65, 0x0b, 0x56, 0x7a, 0x49, 0xbc, 0xef, 0xbd, + 0x79, 0xbf, 0xdf, 0x7b, 0xf3, 0x3e, 0x86, 0x82, 0x6b, 0x78, 0x9f, 0xba, 0x34, 0xc4, 0x15, 0x1a, + 0xe0, 0xd0, 0xe1, 0x34, 0xac, 0xf4, 0xef, 0x54, 0xf8, 0x7e, 0x39, 0x08, 0x29, 0xa7, 0x68, 0x56, + 0x69, 0xcb, 0xb1, 0xb6, 0xdc, 0xbf, 0x53, 0x9c, 0x71, 0xba, 0xc4, 0xa7, 0x15, 0xf9, 0xdf, 0xc8, + 0xae, 0x78, 0xc5, 0xa5, 0xac, 0x4b, 0x59, 0xa5, 0xcb, 0xda, 0xe2, 0x7c, 0x97, 0xb5, 0x95, 0xe2, + 0x1b, 0x4a, 0xc1, 0xb8, 0xb3, 0x47, 0x7c, 0xa1, 0xdc, 0xc5, 0xdc, 0xb9, 0x13, 0x7f, 0x2b, 0xab, + 0xab, 0x91, 0x95, 0x2d, 0xbf, 0x2a, 0xd1, 0x87, 0x52, 0x5d, 0x6c, 0xd3, 0x36, 0x8d, 0xe4, 0xe2, + 0x5f, 0x4a, 0x7a, 0xad, 0x4d, 0x69, 0xdb, 0xc3, 0x15, 0x27, 0x20, 0x15, 0xc7, 0xf7, 0x29, 0x77, + 0x38, 0xa1, 0xbe, 0x3a, 0x63, 0xee, 0x41, 0x76, 0x0d, 0xbb, 0x0f, 0x1c, 0xaf, 0x87, 0x37, 0x08, + 0xf6, 0x9a, 0x68, 0x1b, 0xce, 0x3b, 0x5d, 0xda, 0xf3, 0x79, 0xc1, 0x78, 0xc3, 0x98, 0x4f, 0xad, + 0xbc, 0xf7, 0xf9, 0x61, 0x69, 0xe2, 0x1f, 0x87, 0xa5, 0x9b, 0x6d, 0xc2, 0x3b, 0xbd, 0xdd, 0xb2, + 0x4b, 0xbb, 0x0a, 0x55, 0xfd, 0x6f, 0x91, 0x35, 0xf7, 0x2a, 0xfc, 0x20, 0xc0, 0xac, 0xbc, 0x86, + 0xdd, 0x27, 0x9f, 0x2d, 0x82, 0x22, 0xb5, 0x86, 0x5d, 0x4b, 0xf9, 0xba, 0x9b, 0xfc, 0xf5, 0xa3, + 0xd2, 0x84, 0xf9, 0x9f, 0x04, 0x5c, 0xaa, 0xab, 0xec, 0xd4, 0x03, 0x8e, 0x9b, 0x3b, 0x8d, 0x35, + 0x09, 0x8d, 0x42, 0x98, 0x66, 0xd8, 0x6b, 0xd9, 0x3d, 0xd6, 0xb4, 0xfb, 0x42, 0xa2, 0xd0, 0x37, + 0x4f, 0x87, 0x7e, 0x74, 0x58, 0xca, 0x34, 0xb0, 0xd7, 0x8a, 0xfd, 0x3e, 0xc7, 0x26, 0x23, 0x30, + 0x76, 0x58, 0x33, 0xc2, 0xec, 0x41, 0x8e, 0x53, 0xee, 0x78, 0x1a, 0x68, 0x42, 0x82, 0xde, 0x3f, + 0x35, 0x68, 0x76, 0x5b, 0x38, 0x1a, 0x83, 0x9a, 0x95, 0x28, 0x03, 0xd8, 0x7d, 0xc8, 0x3b, 0x2e, + 0x27, 0x7d, 0xac, 0xe1, 0x4e, 0x4a, 0xdc, 0xda, 0xa9, 0x71, 0xa7, 0x97, 0xa5, 0xa7, 0x31, 0xc0, + 0xd3, 0x11, 0x4e, 0x8c, 0x6c, 0xfe, 0xc1, 0x80, 0xd9, 0x38, 0xfd, 0x0f, 0x28, 0x27, 0x7e, 0x7b, + 0x8b, 0x7e, 0x88, 0x43, 0xf4, 0x5d, 0xc8, 0xc6, 0x35, 0x6b, 0x3b, 0xcd, 0x66, 0xa8, 0x72, 0x5f, + 0x78, 0xf2, 0xd9, 0xe2, 0x45, 0xe5, 0x6e, 0xb9, 0xd9, 0x0c, 0x31, 0x63, 0x0d, 0x1e, 0x12, 0xbf, + 0x6d, 0x65, 0x62, 0x73, 0x21, 0x46, 0x36, 0x64, 0xfa, 0xd2, 0x9b, 0x1d, 0x08, 0x77, 0x2a, 0x89, + 0xaf, 0x57, 0x37, 0xe9, 0xfe, 0x31, 0x3f, 0xf3, 0xcb, 0x04, 0xcc, 0x6a, 0x7c, 0x1b, 0xbe, 0x13, + 0xb0, 0x0e, 0xe5, 0xe8, 0x21, 0xa0, 0xe8, 0x02, 0x87, 0xe0, 0xcf, 0xa2, 0x6c, 0xf3, 0xd2, 0xaf, + 0x9e, 0xa3, 0x1f, 0xc3, 0xe5, 0x41, 0x8e, 0x74, 0x38, 0x56, 0x48, 0xbc, 0x31, 0x39, 0x9f, 0x5e, + 0x9a, 0x2f, 0x8f, 0x68, 0xff, 0xf2, 0x88, 0x6c, 0x5b, 0x17, 0xe9, 0x8b, 0x42, 0x86, 0xca, 0x30, + 0xeb, 0x39, 0x8c, 0xdb, 0x6e, 0xc7, 0xf1, 0xdb, 0xb8, 0x69, 0x77, 0x30, 0x69, 0x77, 0xb8, 0x2c, + 0x8c, 0x49, 0x6b, 0x46, 0xa8, 0x56, 0x23, 0xcd, 0x3d, 0xa9, 0x40, 0xb7, 0x21, 0x8f, 0x03, 0xea, + 0x76, 0x6c, 0xd2, 0xc4, 0x3e, 0x27, 0x2d, 0x82, 0xc3, 0x42, 0x52, 0x44, 0x6e, 0xe5, 0xa4, 0xbc, + 0x3a, 0x10, 0xa3, 0xeb, 0x90, 0x89, 0x4c, 0xfd, 0x5e, 0x77, 0x17, 0x87, 0x85, 0x73, 0xd2, 0x67, + 0x5a, 0xca, 0x6a, 0x52, 0x64, 0xfe, 0x04, 0xa6, 0xe3, 0xac, 0xde, 0xc3, 0x5e, 0x80, 0xc3, 0x71, + 0x7c, 0x8c, 0x71, 0x7c, 0xe6, 0x20, 0xdd, 0x71, 0x98, 0x4d, 0x03, 0x6e, 0xd3, 0x1e, 0x97, 0x35, + 0x30, 0x65, 0xa5, 0x3a, 0x0e, 0xab, 0x07, 0xbc, 0xde, 0xe3, 0xe6, 0x01, 0x14, 0x57, 0x3d, 0x82, + 0x7d, 0x71, 0x8c, 0xf8, 0xeb, 0x4e, 0xe8, 0x13, 0xbf, 0x2d, 0xca, 0x67, 0x93, 0x30, 0x8e, 0x7e, + 0x08, 0x33, 0x38, 0x12, 0xd9, 0xc4, 0x6f, 0x51, 0xdb, 0x23, 0x4c, 0x60, 0x89, 0xc4, 0x56, 0x46, + 0x26, 0x76, 0xb4, 0xaf, 0xaa, 0xdf, 0xa2, 0x56, 0x4e, 0x79, 0x12, 0x1f, 0xc2, 0xb9, 0xf9, 0x1b, + 0x63, 0x1c, 0xb6, 0x30, 0x41, 0xdf, 0x03, 0xe4, 0x7d, 0x64, 0xbb, 0xd2, 0x40, 0x84, 0x4b, 0x7c, + 0x9b, 0x34, 0x65, 0xa0, 0xc9, 0x95, 0xd9, 0xa3, 0xc3, 0x52, 0x6e, 0xf3, 0x23, 0xed, 0x74, 0x75, + 0xcd, 0xca, 0x79, 0x43, 0x82, 0x26, 0x7a, 0x17, 0xae, 0x0e, 0x1d, 0x8f, 0x43, 0x91, 0xbd, 0x24, + 0xbb, 0xc1, 0xba, 0xec, 0x8e, 0x24, 0x60, 0xfe, 0x25, 0x01, 0x99, 0xb8, 0x48, 0x24, 0x9b, 0x37, + 0x21, 0xab, 0x8e, 0x33, 0xad, 0x17, 0xad, 0x4c, 0x2c, 0x94, 0x1d, 0x77, 0x1d, 0x32, 0x4e, 0x10, + 0x84, 0xb4, 0x8f, 0x75, 0x8c, 0xb4, 0x92, 0x49, 0x93, 0x6f, 0x01, 0x1a, 0xd4, 0x6b, 0x17, 0x73, + 0x47, 0xe6, 0x35, 0x9a, 0x33, 0x56, 0x3e, 0xd6, 0xdc, 0xc7, 0xdc, 0x91, 0xa8, 0x1e, 0x14, 0x47, + 0x45, 0xa0, 0x28, 0x88, 0xba, 0x3a, 0xdd, 0x45, 0x88, 0xbc, 0x5b, 0x57, 0x5e, 0x8c, 0x39, 0xa2, + 0x7f, 0x1f, 0xc0, 0xa5, 0xdd, 0x2e, 0x61, 0x8c, 0x50, 0x5f, 0x96, 0x63, 0x7a, 0xc9, 0x2c, 0xab, + 0xf6, 0x8b, 0xb7, 0x9d, 0xda, 0x7e, 0xe5, 0xd5, 0x81, 0xe5, 0x4a, 0x4a, 0xf4, 0xf4, 0xef, 0x9f, + 0x3d, 0x5e, 0x30, 0x2c, 0xcd, 0x81, 0xf9, 0x5b, 0x03, 0x52, 0x72, 0x9b, 0xc8, 0x50, 0x6e, 0xc0, + 0x34, 0xf3, 0x1c, 0xd6, 0xb1, 0x5d, 0xea, 0xf3, 0xd0, 0x71, 0xd5, 0x1e, 0xb3, 0xb2, 0x52, 0xba, + 0xaa, 0x84, 0xe8, 0x26, 0xe4, 0xa8, 0x38, 0x63, 0x13, 0x3f, 0xae, 0x6d, 0x91, 0xc5, 0xa4, 0x95, + 0xa5, 0x91, 0x2b, 0x55, 0xd7, 0xf3, 0x90, 0x8f, 0xec, 0x68, 0x8f, 0xeb, 0x4d, 0x99, 0xb4, 0xa6, + 0xa5, 0xbc, 0xde, 0xe3, 0xca, 0xf2, 0x32, 0x9c, 0x7f, 0xe8, 0x10, 0x0f, 0x37, 0x65, 0xbe, 0xa6, + 0x2c, 0xf5, 0x65, 0xfe, 0xd1, 0x80, 0x19, 0x45, 0x6f, 0x99, 0x31, 0xcc, 0x1b, 0xdc, 0xe1, 0xf8, + 0xb5, 0xd6, 0x6c, 0xd5, 0xe7, 0xda, 0xbc, 0xaa, 0xfa, 0x3c, 0x5e, 0xb3, 0xc8, 0x82, 0x73, 0xfa, + 0x22, 0x7b, 0xbd, 0x21, 0x18, 0xb9, 0x32, 0xff, 0x6c, 0xc0, 0xa5, 0x86, 0xc8, 0xdd, 0x46, 0x48, + 0xbb, 0x3b, 0x7e, 0x13, 0x7b, 0xb8, 0x2d, 0x9f, 0x10, 0xe8, 0x36, 0xa4, 0xc4, 0x6d, 0xe1, 0x30, + 0x6e, 0x98, 0xd4, 0x4a, 0xe6, 0xe8, 0xb0, 0x34, 0xd5, 0x90, 0xc2, 0xea, 0x9a, 0x35, 0x15, 0xa9, + 0xab, 0x4d, 0x74, 0x13, 0xa6, 0x1c, 0x11, 0xbc, 0xb0, 0x8c, 0xb8, 0xa5, 0x8f, 0x0e, 0x4b, 0x17, + 0x64, 0x42, 0xaa, 0x6b, 0xd6, 0x05, 0xa9, 0xac, 0xea, 0xaf, 0x8f, 0xc9, 0xb3, 0x4b, 0x8b, 0xf9, + 0xa9, 0x01, 0xb3, 0x83, 0x10, 0x24, 0x26, 0xdb, 0xa2, 0xd4, 0x1b, 0x62, 0x65, 0xbc, 0x12, 0xab, + 0xc4, 0x19, 0xb2, 0xfa, 0x55, 0x12, 0x90, 0x64, 0xb5, 0xbe, 0x8f, 0xdd, 0x9e, 0xc8, 0xa8, 0x2c, + 0xe0, 0x36, 0xe4, 0xa3, 0x02, 0x0e, 0x42, 0x1a, 0xd0, 0x50, 0xc8, 0xcf, 0x64, 0xa7, 0xe5, 0xa4, + 0xd7, 0xad, 0x81, 0x53, 0xf4, 0x23, 0x48, 0x47, 0x40, 0x67, 0x57, 0x32, 0x20, 0x1d, 0x46, 0xef, + 0x1c, 0x07, 0x66, 0x23, 0xf7, 0x3d, 0xad, 0x66, 0x58, 0x61, 0x52, 0x4e, 0xf5, 0x85, 0x91, 0xc3, + 0x64, 0x64, 0x99, 0xad, 0x24, 0x05, 0x25, 0x0b, 0x49, 0x67, 0xba, 0x82, 0xa1, 0x0f, 0x60, 0x26, + 0x82, 0x90, 0x17, 0xc5, 0xec, 0x80, 0x52, 0xaf, 0x90, 0x3c, 0x61, 0x1f, 0x8f, 0x28, 0x02, 0xe5, + 0x3e, 0xca, 0x8e, 0x56, 0x1b, 0xef, 0x41, 0x51, 0x27, 0x6e, 0xb7, 0x88, 0xc7, 0x71, 0x18, 0x8f, + 0x80, 0x68, 0x87, 0x16, 0x74, 0x8b, 0x0d, 0x69, 0xa0, 0x86, 0xc1, 0x3b, 0x70, 0xa5, 0x43, 0x18, + 0xa7, 0x21, 0x71, 0x9f, 0x7f, 0x9f, 0x9c, 0x97, 0x47, 0x2f, 0x1d, 0xab, 0xb5, 0x77, 0x80, 0xf9, + 0xdf, 0x84, 0x18, 0x16, 0x11, 0x61, 0x49, 0xf6, 0x34, 0x33, 0xed, 0x36, 0xe4, 0x59, 0x6f, 0xb7, + 0x4b, 0x38, 0x3f, 0x5e, 0xd8, 0x09, 0x89, 0x96, 0x1b, 0xc8, 0x15, 0x3f, 0xf1, 0x26, 0xe8, 0x8b, + 0x79, 0x3f, 0xf4, 0xce, 0x48, 0x4b, 0x99, 0x32, 0xf9, 0x1a, 0xa4, 0x08, 0xb3, 0xfb, 0x98, 0xd3, + 0xc1, 0x48, 0x9b, 0x22, 0xec, 0x81, 0xfc, 0x1e, 0x59, 0xa4, 0xe7, 0xbe, 0x8a, 0x22, 0xfd, 0x3a, + 0x44, 0x35, 0x65, 0x8b, 0x13, 0x32, 0x77, 0x59, 0x2b, 0x25, 0x25, 0xdb, 0x07, 0x01, 0x46, 0x35, + 0x98, 0xc6, 0x71, 0xf7, 0x44, 0x2b, 0xee, 0x82, 0x5c, 0x27, 0xb7, 0xc6, 0x5f, 0xff, 0x50, 0xb7, + 0x59, 0x59, 0xac, 0x7f, 0x9a, 0x7f, 0x32, 0x60, 0xd6, 0xc2, 0x6d, 0xc2, 0x38, 0x0e, 0xe3, 0x7b, + 0xb0, 0xf0, 0x4f, 0xd1, 0x77, 0x20, 0xd3, 0x0a, 0x69, 0x57, 0xee, 0x43, 0xcc, 0xd8, 0x4b, 0x5f, + 0xc8, 0x69, 0x61, 0xad, 0x44, 0xe8, 0x6d, 0x48, 0x4a, 0x6a, 0x09, 0x49, 0xed, 0xfa, 0x89, 0x2f, + 0x45, 0x49, 0x4a, 0x9a, 0xdf, 0xfd, 0xf6, 0x27, 0x8f, 0x4a, 0x13, 0xff, 0x7e, 0x54, 0x9a, 0xf8, + 0xf8, 0xd9, 0xe3, 0x85, 0xf4, 0xc6, 0xb1, 0xc3, 0x9f, 0x3f, 0x7b, 0xbc, 0x70, 0x45, 0x4b, 0xa6, + 0x7e, 0xd6, 0x2c, 0x42, 0xe1, 0xc5, 0x00, 0x58, 0x40, 0x7d, 0x86, 0xcd, 0x2f, 0x0d, 0xc8, 0xd6, + 0x03, 0x5e, 0xf5, 0x39, 0x5d, 0x7e, 0xd0, 0x78, 0xed, 0xb8, 0x4a, 0x90, 0x76, 0xfa, 0x6c, 0x70, + 0x36, 0x7a, 0x85, 0x80, 0xd3, 0x67, 0xb1, 0xc1, 0xbb, 0x90, 0x0b, 0x7a, 0xbb, 0x1e, 0x71, 0xed, + 0x3d, 0x7c, 0x60, 0x3f, 0x64, 0xd4, 0x57, 0x63, 0x7d, 0x46, 0xfc, 0x66, 0xda, 0x92, 0xaa, 0xf7, + 0xf1, 0xc1, 0xf7, 0x1b, 0xf5, 0x9a, 0x95, 0x0d, 0x06, 0x9f, 0x8c, 0xfa, 0x77, 0xdf, 0x3e, 0x29, + 0xf8, 0xc2, 0x50, 0xf0, 0x5a, 0x3c, 0xe6, 0x45, 0x40, 0xba, 0x40, 0xc5, 0xfd, 0x3b, 0x03, 0xa6, + 0xa3, 0x77, 0x68, 0xbd, 0xf5, 0xff, 0x08, 0xfc, 0xee, 0x3b, 0x27, 0xb1, 0xbf, 0x3a, 0xcc, 0x5e, + 0x63, 0x65, 0x5e, 0x12, 0x3f, 0xd0, 0x34, 0x89, 0xe2, 0xff, 0xc4, 0x80, 0x6c, 0x03, 0xf3, 0x55, + 0xea, 0xb3, 0xf7, 0xf1, 0x81, 0xa0, 0xbf, 0x04, 0x17, 0x5e, 0x95, 0x79, 0x6c, 0xf8, 0x95, 0x5e, + 0xd7, 0x1d, 0x3d, 0xe0, 0x18, 0xf1, 0xf9, 0xab, 0x1a, 0x0a, 0x41, 0x5c, 0x95, 0x2e, 0x88, 0x42, + 0x5d, 0xf0, 0x20, 0xd5, 0x18, 0x74, 0x77, 0x11, 0x2e, 0x37, 0x36, 0x97, 0x1b, 0xf7, 0xec, 0xed, + 0x1f, 0x6c, 0xad, 0xdb, 0x3b, 0xb5, 0xc6, 0xd6, 0xfa, 0x6a, 0x75, 0xa3, 0xba, 0xbe, 0x96, 0x9f, + 0x40, 0xd7, 0xa0, 0xa0, 0xe9, 0xaa, 0xb5, 0xc6, 0xf6, 0x72, 0x6d, 0xdb, 0x96, 0xa2, 0xbc, 0x81, + 0x6e, 0xc0, 0x75, 0x4d, 0x5b, 0xab, 0xc7, 0x06, 0xcb, 0xb5, 0xf5, 0xfa, 0x4e, 0x43, 0x99, 0x25, + 0x96, 0xfe, 0x96, 0x84, 0xc9, 0xfb, 0xac, 0x8d, 0x1e, 0x19, 0x90, 0x7f, 0xbe, 0x6b, 0xd0, 0xe8, + 0x15, 0x32, 0x62, 0x3a, 0x14, 0x17, 0x5f, 0xd1, 0x52, 0x5d, 0xe7, 0x5b, 0x1f, 0xff, 0xf5, 0x5f, + 0x9f, 0x26, 0x16, 0xcd, 0x6f, 0x56, 0x46, 0xff, 0x41, 0xa9, 0x32, 0x6a, 0x02, 0x7d, 0x62, 0x00, + 0x1c, 0xe7, 0x0b, 0x99, 0xa3, 0x07, 0x9c, 0x9e, 0xe1, 0xe2, 0xad, 0x97, 0xda, 0x28, 0x42, 0x8b, + 0x92, 0xd0, 0x2d, 0xf3, 0xc6, 0x38, 0x42, 0xc3, 0xc5, 0x27, 0xa8, 0x1c, 0x77, 0xd9, 0x18, 0x2a, + 0x43, 0x7d, 0x39, 0x86, 0xca, 0x88, 0x56, 0x7d, 0x29, 0x95, 0xe1, 0xf9, 0xf5, 0x0b, 0x03, 0xd2, + 0x5a, 0xc7, 0xa0, 0x37, 0xc7, 0xe1, 0x68, 0x5d, 0x56, 0x9c, 0x7f, 0xb9, 0x91, 0x62, 0x53, 0x96, + 0x6c, 0xe6, 0xcd, 0x9b, 0x27, 0xb0, 0xd1, 0x3d, 0x9f, 0xfb, 0x99, 0xf8, 0x75, 0xb2, 0xb2, 0xf9, + 0xf9, 0xd1, 0x9c, 0xf1, 0xc5, 0xd1, 0x9c, 0xf1, 0xcf, 0xa3, 0x39, 0xe3, 0x97, 0x4f, 0xe7, 0x26, + 0xbe, 0x78, 0x3a, 0x37, 0xf1, 0xf7, 0xa7, 0x73, 0x13, 0x1f, 0x2c, 0x69, 0x5b, 0x71, 0x3d, 0x72, + 0x59, 0xc3, 0xfc, 0x43, 0x1a, 0xee, 0x0d, 0x10, 0xf6, 0x8f, 0x31, 0xe4, 0x96, 0xdc, 0x3d, 0x2f, + 0xff, 0x52, 0xf7, 0xd6, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x90, 0x6a, 0x65, 0xdc, 0x7f, 0x14, + 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -2140,6 +2150,11 @@ func (m *SlashExecutionInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.HistoricalVotingPower != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.HistoricalVotingPower)) + i-- + dAtA[i] = 0x30 + } if m.UndelegationFilterHeight != 0 { i = encodeVarintTx(dAtA, i, uint64(m.UndelegationFilterHeight)) i-- @@ -2778,6 +2793,9 @@ func (m *SlashExecutionInfo) Size() (n int) { if m.UndelegationFilterHeight != 0 { n += 1 + sovTx(uint64(m.UndelegationFilterHeight)) } + if m.HistoricalVotingPower != 0 { + n += 1 + sovTx(uint64(m.HistoricalVotingPower)) + } return n } @@ -4665,6 +4683,25 @@ func (m *SlashExecutionInfo) Unmarshal(dAtA []byte) error { break } } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HistoricalVotingPower", wireType) + } + m.HistoricalVotingPower = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HistoricalVotingPower |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTx(dAtA[iNdEx:]) diff --git a/x/operator/types/validator.go b/x/operator/types/validator.go index 142b60731..e803d96c8 100644 --- a/x/operator/types/validator.go +++ b/x/operator/types/validator.go @@ -21,6 +21,7 @@ func NewValidator(operator sdk.AccAddress, pubKey cryptotypes.PubKey) (Validator return Validator{ OperatorEarningsAddr: operator.String(), + OperatorApproveAddr: operator.String(), ConsensusPubkey: pkAny, Jailed: false, Status: stakingtypes.Bonded, diff --git a/x/oracle/genesis.go b/x/oracle/genesis.go index 3dc8994e9..6461bf7cc 100644 --- a/x/oracle/genesis.go +++ b/x/oracle/genesis.go @@ -14,7 +14,7 @@ func InitGenesis(ctx sdk.Context, k keeper.Keeper, genState types.GenesisState) } // Set if defined if genState.ValidatorUpdateBlock != nil { - k.SetValidatorUpdateBlock(ctx, *genState.ValidatorUpdateBlock) + k.SetValidatorUpdateForCache(ctx, *genState.ValidatorUpdateBlock) } // Set if defined if genState.IndexRecentParams != nil { @@ -39,6 +39,7 @@ func InitGenesis(ctx sdk.Context, k keeper.Keeper, genState types.GenesisState) // Set all stakerInfos for assetIDs for _, elem := range genState.StakerInfosAssets { k.SetStakerInfos(ctx, elem.AssetId, elem.StakerInfos) + k.SetNSTVersion(ctx, elem.AssetId, elem.NstVersion) } // set validatorReportInfos for _, elem := range genState.ValidatorReportInfos { diff --git a/x/oracle/keeper/aggregator/aggregator.go b/x/oracle/keeper/aggregator/aggregator.go deleted file mode 100644 index 1d21ceb00..000000000 --- a/x/oracle/keeper/aggregator/aggregator.go +++ /dev/null @@ -1,240 +0,0 @@ -package aggregator - -import ( - "math/big" - "sort" - - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/common" - "github.com/ExocoreNetwork/exocore/x/oracle/types" -) - -type priceWithTimeAndRound struct { - price string - decimal int32 - timestamp string - detRoundID string // roundId from source if exists -} - -type reportPrice struct { - validator string - // final price, set to -1 as initial - price string - // sourceId->priceWithTimeAndRound - prices map[uint64]*priceWithTimeAndRound - power *big.Int -} - -func (r *reportPrice) aggregate() string { - if len(r.price) > 0 { - return r.price - } - tmp := make([]*big.Int, 0, len(r.prices)) - for _, p := range r.prices { - priceInt, ok := new(big.Int).SetString(p.price, 10) - // price is not a number (NST), we will just return instead of calculation - if !ok { - return p.price - } - tmp = append(tmp, priceInt) - } - r.price = common.BigIntList(tmp).Median().String() - return r.price -} - -type aggregator struct { - finalPrice string - reports []*reportPrice - // total valiadtor power who has submitted price - reportPower *big.Int - totalPower *big.Int - // validator set total power - // totalPower string - // sourceId->roundId used to track the confirmed DS roundId - // updated by calculator, detId use string - dsPrices map[uint64]string -} - -func (agg *aggregator) copy4CheckTx() *aggregator { - ret := &aggregator{ - finalPrice: agg.finalPrice, - reportPower: copyBigInt(agg.reportPower), - totalPower: copyBigInt(agg.totalPower), - - reports: make([]*reportPrice, 0, len(agg.reports)), - dsPrices: make(map[uint64]string), - } - for k, v := range agg.dsPrices { - ret.dsPrices[k] = v - } - for _, report := range agg.reports { - rTmp := *report - rTmp.price = report.price - rTmp.power = copyBigInt(report.power) - - for k, v := range report.prices { - // prices are information submitted by validators, these data will not change under deterministic sources, but with non-deterministic sources they might be overwrite by later prices - tmpV := *v - tmpV.price = v.price - rTmp.prices[k] = &tmpV - } - - ret.reports = append(ret.reports, &rTmp) - } - - return ret -} - -// fill price from validator submitting into aggregator, and calculation the voting power and check with the consensus status of deterministic source value to decide when to do the aggregation -// TODO: currently apply mode=1 in V1, add swith modes -func (agg *aggregator) fillPrice(pSources []*types.PriceSource, validator string, power *big.Int) { - report := agg.getReport(validator) - if report == nil { - report = &reportPrice{ - validator: validator, - prices: make(map[uint64]*priceWithTimeAndRound), - power: power, - } - agg.reports = append(agg.reports, report) - agg.reportPower = new(big.Int).Add(agg.reportPower, power) - } - - for _, pSource := range pSources { - if len(pSource.Prices[0].DetID) == 0 { - // this is an NS price report, price will just be updated instead of append - if pTR := report.prices[pSource.SourceID]; pTR == nil { - pTmp := pSource.Prices[0] - pTR = &priceWithTimeAndRound{ - price: pTmp.Price, - decimal: pTmp.Decimal, - timestamp: pTmp.Timestamp, - } - report.prices[pSource.SourceID] = pTR - } else { - pTR.price = pSource.Prices[0].Price - } - } else { - // this is an DS price report - if pTR := report.prices[pSource.SourceID]; pTR == nil { - pTmp := pSource.Prices[0] - pTR = &priceWithTimeAndRound{ - decimal: pTmp.Decimal, - } - if len(agg.dsPrices[pSource.SourceID]) > 0 { - for _, reportTmp := range agg.reports { - if priceTmp := reportTmp.prices[pSource.SourceID]; priceTmp != nil && len(priceTmp.price) > 0 { - pTR.price = priceTmp.price - pTR.detRoundID = priceTmp.detRoundID - pTR.timestamp = priceTmp.timestamp - break - } - } - } - report.prices[pSource.SourceID] = pTR - } - // skip if this DS's slot exists, DS's value only updated by calculator - } - } -} - -// TODO: for v1 use mode=1, which means agg.dsPrices with each key only be updated once, switch modes -func (agg *aggregator) confirmDSPrice(confirmedRounds []*confirmedPrice) { - for _, priceSourceRound := range confirmedRounds { - // update the latest round-detId for DS, TODO: in v1 we only update this value once since calculator will just ignore any further value once a detId has reached consensus - // agg.dsPrices[priceSourceRound.sourceId] = priceSourceRound.detId - // this id's comparison need to format id to make sure them be the same length - if id := agg.dsPrices[priceSourceRound.sourceID]; len(id) == 0 || (len(id) > 0 && id < priceSourceRound.detID) { - agg.dsPrices[priceSourceRound.sourceID] = priceSourceRound.detID - for _, report := range agg.reports { - if len(report.price) > 0 { - // price of IVA has completed - continue - } - if price := report.prices[priceSourceRound.sourceID]; price != nil { - price.detRoundID = priceSourceRound.detID - price.timestamp = priceSourceRound.timestamp - price.price = priceSourceRound.price - } // else TODO: panic in V1 - } - } - } -} - -func (agg *aggregator) getReport(validator string) *reportPrice { - for _, r := range agg.reports { - if r.validator == validator { - return r - } - } - return nil -} - -func (agg *aggregator) aggregate() string { - if len(agg.finalPrice) > 0 { - return agg.finalPrice - } - // TODO: implemetn different MODE for definition of consensus, - // currently: use rule_1+MODE_1: {rule:specified source:`chainlink`, MODE: asap when power exceeds the threshold} - // 1. check OVA threshold - // 2. check IVA consensus with rule, TODO: for v1 we only implement with mode=1&rule=1 - if common.ExceedsThreshold(agg.reportPower, agg.totalPower) { - // TODO: this is kind of a mock way to suite V1, need update to check with params.rule - // check if IVA all reached consensus - if len(agg.dsPrices) > 0 { - validatorPrices := make([]*big.Int, 0, len(agg.reports)) - // do the aggregation to find out the 'final price' - for _, validatorReport := range agg.reports { - priceInt, ok := new(big.Int).SetString(validatorReport.aggregate(), 10) - if !ok { - // price is not number, we just return the price when power exceeds threshold - agg.finalPrice = validatorReport.aggregate() - return agg.finalPrice - } - validatorPrices = append(validatorPrices, priceInt) - } - // vTmp := bigIntList(validatorPrices) - agg.finalPrice = common.BigIntList(validatorPrices).Median().String() - // clear relative aggregator for this feeder, all the aggregator,calculator, filter can be removed since this round has been sealed - } - } - return agg.finalPrice -} - -// TODO: this only suites for DS. check source type for extension -// GetFinaPriceListForFeederIDs retrieve final price info as an array ordered by sourceID asc -func (agg *aggregator) getFinalPriceList(feederID uint64) []*types.AggFinalPrice { - sourceIDs := make([]uint64, 0, len(agg.dsPrices)) - for sID := range agg.dsPrices { - sourceIDs = append(sourceIDs, sID) - } - sort.Slice(sourceIDs, func(i, j int) bool { - return sourceIDs[i] < sourceIDs[j] - }) - ret := make([]*types.AggFinalPrice, 0, len(sourceIDs)) - for _, sID := range sourceIDs { - for _, report := range agg.reports { - price := report.prices[sID] - if price == nil || price.detRoundID != agg.dsPrices[sID] { - // the DetID mismatch should not happen - continue - } - ret = append(ret, &types.AggFinalPrice{ - FeederID: feederID, - SourceID: sID, - DetID: price.detRoundID, - Price: price.price, - }) - // {feederID, sourceID} has been found, skip rest reports - break - } - } - return ret -} - -func newAggregator(validatorSetLength int, totalPower *big.Int) *aggregator { - return &aggregator{ - reports: make([]*reportPrice, 0, validatorSetLength), - reportPower: big.NewInt(0), - dsPrices: make(map[uint64]string), - totalPower: totalPower, - } -} diff --git a/x/oracle/keeper/aggregator/aggregator_test.go b/x/oracle/keeper/aggregator/aggregator_test.go deleted file mode 100644 index 35adf0aa1..000000000 --- a/x/oracle/keeper/aggregator/aggregator_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package aggregator - -import ( - "math/big" - "testing" - - . "github.com/smartystreets/goconvey/convey" -) - -func TestAggregator(t *testing.T) { - Convey("fill prices into aggregator", t, func() { - a := newAggregator(5, big.NewInt(4)) - // a.fillPrice(pS1, "v1", one) //v1:{1, 2} - - Convey("fill v1's report", func() { - a.fillPrice(pS1, "v1", one) // v1:{1, 2} - report := a.getReport("v1") - So(report.prices[1].price, ShouldEqual, "") - Convey("fill v2's report", func() { - a.fillPrice(pS2, "v2", one) - report := a.getReport("v2") - So(report.prices[1].price, ShouldEqual, "") - Convey("fill more v1's report", func() { - a.fillPrice(pS21, "v1", one) - report := a.getReport("v1") - So(report.prices[1].price, ShouldEqual, "") - So(report.prices[2].price, ShouldEqual, "") - Convey("confirm deterministic source_1 and source 2", func() { - a.confirmDSPrice([]*confirmedPrice{ - { - sourceID: 1, - detID: "9", - price: "10", - timestamp: "-", - }, - { - sourceID: 2, - detID: "3", - price: "20", - timestamp: "-", - }, - }) - reportV1 := a.getReport("v1") - reportV2 := a.getReport("v2") - So(reportV1.prices[1].price, ShouldResemble, "10") - So(reportV1.prices[1].detRoundID, ShouldEqual, "9") - - So(reportV2.prices[1].price, ShouldResemble, "10") - So(reportV2.prices[1].detRoundID, ShouldEqual, "9") - - So(reportV1.prices[2].price, ShouldResemble, "20") - So(reportV1.prices[2].detRoundID, ShouldEqual, "3") - - // current implementation only support v1's single source - Convey("aggregate after all source confirmed", func() { - a.fillPrice(pS6, "v3", one) - a.aggregate() // v1:{s1:9-10, s2:3-20}:15, v2:{s1:9-10}:10 - So(a.getReport("v1").price, ShouldEqual, "15") - So(a.getReport("v2").price, ShouldEqual, "10") - So(a.getReport("v3").price, ShouldEqual, "20") - So(a.finalPrice, ShouldEqual, "15") - }) - }) - }) - }) - }) - }) -} diff --git a/x/oracle/keeper/aggregator/calculator.go b/x/oracle/keeper/aggregator/calculator.go deleted file mode 100644 index 5e1b8fd5c..000000000 --- a/x/oracle/keeper/aggregator/calculator.go +++ /dev/null @@ -1,197 +0,0 @@ -package aggregator - -import ( - "math/big" - - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/common" - "github.com/ExocoreNetwork/exocore/x/oracle/types" -) - -type confirmedPrice struct { - sourceID uint64 - detID string - price string - timestamp string -} - -// internal struct -type priceAndPower struct { - price string - power *big.Int -} - -// for a specific DS round, it could have multiple values provided by different validators(should not be true if there's no malicious validator) -type roundPrices struct { // 0 means NS - detID string - prices []*priceAndPower - price string - timestamp string - // confirmed bool -} - -// udpate priceAndPower for a specific DSRoundID, if the price exists, increase its power with provided data -// return confirmed=true, when detect power exceeds the threshold -func (r *roundPrices) updatePriceAndPower(pw *priceAndPower, totalPower *big.Int) (updated bool, confirmed bool) { - if len(r.price) > 0 { - confirmed = true - return - } - for _, item := range r.prices { - if item.price == pw.price { - item.power = new(big.Int).Add(item.power, pw.power) - updated = true - if common.ExceedsThreshold(item.power, totalPower) { - r.price = item.price - confirmed = true - } - return - } - } - if len(r.prices) < cap(r.prices) { - r.prices = append(r.prices, pw) - updated = true - if common.ExceedsThreshold(pw.power, totalPower) { - r.price = pw.price - confirmed = true - } - } - return -} - -// each DS corresponding a roundPriceList to represent its multiple rounds(DS round) in one oracle-round -type roundPricesList struct { - roundPricesList []*roundPrices - // each round can have at most roundPricesCount priceAndPower - roundPricesCount int -} - -func (r *roundPricesList) copy4CheckTx() *roundPricesList { - ret := &roundPricesList{ - roundPricesList: make([]*roundPrices, 0, len(r.roundPricesList)), - roundPricesCount: r.roundPricesCount, - } - - for _, v := range r.roundPricesList { - tmpRP := &roundPrices{ - detID: v.detID, - price: v.price, - prices: make([]*priceAndPower, 0, len(v.prices)), - timestamp: v.timestamp, - } - for _, pNP := range v.prices { - tmpPNP := *pNP - // power will be modified during execution - tmpPNP.power = copyBigInt(pNP.power) - tmpRP.prices = append(tmpRP.prices, &tmpPNP) - } - - ret.roundPricesList = append(ret.roundPricesList, tmpRP) - } - return ret -} - -// to tell if any round of this DS has reached consensus/confirmed -func (r *roundPricesList) hasConfirmedDetID() bool { - for _, round := range r.roundPricesList { - if len(round.price) > 0 { - return true - } - } - return false -} - -// get the roundPriceList correspond to specifid detID of a DS -// if no required data and the pricesList has not reach its limitation, we will add a new slot for this detId -func (r *roundPricesList) getOrNewRound(detID string, timestamp string) (round *roundPrices) { - for _, round = range r.roundPricesList { - if round.detID == detID { - if len(round.price) > 0 { - round = nil - } - return - } - } - - if len(r.roundPricesList) < cap(r.roundPricesList) { - round = &roundPrices{ - detID: detID, - prices: make([]*priceAndPower, 0, r.roundPricesCount), - timestamp: timestamp, - } - r.roundPricesList = append(r.roundPricesList, round) - return - } - return -} - -// calculator used to get consensus on deterministic source based data from validator set reports of price -type calculator struct { - // sourceId->{[]{roundId, []{price,power}, confirmed}}, confirmed value will be set in [0] - deterministicSource map[uint64]*roundPricesList - validatorLength int - totalPower *big.Int -} - -func (c *calculator) copy4CheckTx() *calculator { - ret := newCalculator(c.validatorLength, c.totalPower) - - // copy deterministicSource - for k, v := range c.deterministicSource { - ret.deterministicSource[k] = v.copy4CheckTx() - } - - return ret -} - -func (c *calculator) newRoundPricesList() *roundPricesList { - return &roundPricesList{ - roundPricesList: make([]*roundPrices, 0, int(common.MaxDetID)*c.validatorLength), - // for each DS-roundId, the count of prices provided is the number of validators at most - roundPricesCount: c.validatorLength, - } -} - -func (c *calculator) getOrNewSourceID(sourceID uint64) *roundPricesList { - rounds := c.deterministicSource[sourceID] - if rounds == nil { - rounds = c.newRoundPricesList() - c.deterministicSource[sourceID] = rounds - } - return rounds -} - -// fillPrice called upon new MsgCreatPrice arrived, to trigger the calculation to get to consensus on the same roundID_of_deterministic_source -// v1 use mode1, TODO: switch modes -func (c *calculator) fillPrice(pSources []*types.PriceSource, _ string, power *big.Int) (confirmedRounds []*confirmedPrice) { - for _, pSource := range pSources { - rounds := c.getOrNewSourceID(pSource.SourceID) - if rounds.hasConfirmedDetID() { - // TODO: this skip is just for V1 to do fast calculation and release EndBlocker pressure, may lead to 'not latest detId' be chosen - break - } - for _, pDetID := range pSource.Prices { - round := rounds.getOrNewRound(pDetID.DetID, pDetID.Timestamp) - if round == nil { - // this sourceId has reach the limitation of different detId, or has confirmed - continue - } - - updated, confirmed := round.updatePriceAndPower(&priceAndPower{pDetID.Price, power}, c.totalPower) - if updated && confirmed { - // sourceId, detId, price - confirmedRounds = append(confirmedRounds, &confirmedPrice{pSource.SourceID, round.detID, round.price, round.timestamp}) // TODO: just in v1 with mode==1, we use asap, so we just ignore any further data from this DS, even higher detId may get to consensus, in this way, in most case, we can complete the calculation in the transaction execution process. Release the pressure in EndBlocker - // TODO: this may delay to current block finish - break - } - } - } - return -} - -func newCalculator(validatorSetLength int, totalPower *big.Int) *calculator { - return &calculator{ - deterministicSource: make(map[uint64]*roundPricesList), - validatorLength: validatorSetLength, - totalPower: totalPower, - } -} diff --git a/x/oracle/keeper/aggregator/calculator_test.go b/x/oracle/keeper/aggregator/calculator_test.go deleted file mode 100644 index e5b85a46e..000000000 --- a/x/oracle/keeper/aggregator/calculator_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package aggregator - -import ( - "math/big" - "testing" - - . "github.com/smartystreets/goconvey/convey" -) - -/* - 1-10, 2-12, 3-15 - -ps1: 1-10, 2-12 -ps2: 2-12, 3-15 -ps3: 1-10, 2-11(m) ---- -ps4: 2-12, 3-19(m) -ps5: 1-10, 3-19(m) ----- -ps1, ps2, ps3, ps4 ---> 2-12 -ps2, ps2, ps3, ps5 ---> 1-10 -*/ -func TestCalculator(t *testing.T) { - one := big.NewInt(1) - Convey("fill prices into calculator", t, func() { - c := newCalculator(5, big.NewInt(4)) - Convey("fill prices from single deterministic source", func() { - c.fillPrice(pS1, "v1", one) // 1-10, 2-12 - c.fillPrice(pS2, "v2", one) // 2-12, 3-15 - c.fillPrice(pS3, "v3", one) // 1-10, 2-11 - Convey("consensus on detid=2 and price=12", func() { - confirmed := c.fillPrice(pS4, "v4", one) // 2-12, 3-19 - So(confirmed[0].detID, ShouldEqual, "2") - So(confirmed[0].price, ShouldResemble, "12") - }) - Convey("consensus on detid=1 and price=10", func() { - confirmed := c.fillPrice(pS5, "v5", one) // 1-10, 3-19 - So(confirmed[0].detID, ShouldEqual, "1") - So(confirmed[0].price, ShouldResemble, "10") - - confirmed = c.fillPrice(pS4, "v4", one) - So(confirmed, ShouldBeNil) - }) - }) - Convey("fill prices from multiple deterministic sources", func() { - c.fillPrice(pS21, "v1", one) - c.fillPrice(pS22, "v2", one) - c.fillPrice(pS23, "v3", one) - Convey("consensus on both source 1 and source 2", func() { - confirmed := c.fillPrice(pS24, "v4", one) - So(len(confirmed), ShouldEqual, 2) - i := 0 - if confirmed[0].sourceID == 2 { - i = 1 - } - So(confirmed[i].detID, ShouldEqual, "2") - So(confirmed[i].price, ShouldResemble, "12") - - So(confirmed[1-i].detID, ShouldEqual, "3") - So(confirmed[1-i].price, ShouldResemble, "15") - }) - Convey("consenus on source 1 only", func() { - confirmed := c.fillPrice(pS25, "v5", one) - So(len(confirmed), ShouldEqual, 1) - So(confirmed[0].detID, ShouldEqual, "1") - So(confirmed[0].price, ShouldResemble, "10") - }) - }) - }) -} diff --git a/x/oracle/keeper/aggregator/context.go b/x/oracle/keeper/aggregator/context.go deleted file mode 100644 index ffa3cedda..000000000 --- a/x/oracle/keeper/aggregator/context.go +++ /dev/null @@ -1,450 +0,0 @@ -package aggregator - -import ( - "errors" - "fmt" - "math/big" - "sort" - - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/cache" - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/common" - "github.com/ExocoreNetwork/exocore/x/oracle/types" - sdk "github.com/cosmos/cosmos-sdk/types" -) - -type PriceItemKV struct { - TokenID uint64 - PriceTR types.PriceTimeRound -} - -type roundInfo struct { - // this round of price will start from block basedBlock+1, the basedBlock served as a trigger to notify validators to submit prices - basedBlock uint64 - // next round id of the price oracle service, price with the id will be record on block basedBlock+1 if all prices submitted by validators(for v1, validators serve as oracle nodes) get to consensus immediately - nextRoundID uint64 - // indicate if this round is open for collecting prices or closed in either condition that success with a consensused price or not - // 1: open, 2: closed - status roundStatus -} - -// roundStatus is an enum type to indicate the status of a roundInfo -type roundStatus int32 - -const ( - // roundStatusOpen indicates the round is open for collecting prices - roundStatusOpen roundStatus = iota + 1 - // roundStatusClosed indicates the round is closed, either success with a consensused price or not - roundStatusClosed -) - -// AggregatorContext keeps memory cache for state params, validatorset, and updatedthese values as they updated on chain. And it keeps the information to track all tokenFeeders' status and data collection -// nolint -type AggregatorContext struct { - params *types.Params - - // validator->power - validatorsPower map[string]*big.Int - totalPower *big.Int - - // each active feederToken has a roundInfo - rounds map[uint64]*roundInfo - - // each roundInfo has a worker - aggregators map[uint64]*worker -} - -func (agc *AggregatorContext) Copy4CheckTx() *AggregatorContext { - ret := &AggregatorContext{ - // params, validatorsPower, totalPower, these values won't change during block executing - params: agc.params, - validatorsPower: agc.validatorsPower, - totalPower: agc.totalPower, - - rounds: make(map[uint64]*roundInfo), - aggregators: make(map[uint64]*worker), - } - - for k, v := range agc.rounds { - vTmp := *v - ret.rounds[k] = &vTmp - } - - for k, v := range agc.aggregators { - w := newWorker(k, ret) - w.sealed = v.sealed - w.price = v.price - - w.f = v.f.copy4CheckTx() - w.c = v.c.copy4CheckTx() - w.a = v.a.copy4CheckTx() - } - - return ret -} - -// sanity check for the msgCreatePrice -func (agc *AggregatorContext) sanityCheck(msg *types.MsgCreatePrice) error { - // sanity check - // TODO: check the msgCreatePrice's Decimal is correct with params setting - // TODO: check len(price.prices)>0, len(price.prices._range_eachPriceSource.Prices)>0, at least has one source, and for each source has at least one price - - if accAddress, err := sdk.AccAddressFromBech32(msg.Creator); err != nil { - return errors.New("invalid address") - } else if _, ok := agc.validatorsPower[sdk.ConsAddress(accAddress).String()]; !ok { - return errors.New("signer is not validator") - } - - if len(msg.Prices) == 0 { - return errors.New("msg should provide at least one price") - } - - for _, pSource := range msg.Prices { - if len(pSource.Prices) == 0 || len(pSource.Prices) > int(common.MaxDetID) || !agc.params.IsValidSource(pSource.SourceID) { - return errors.New("source should be valid and provide at least one price") - } - // check with params is coressponding source is deteministic - if agc.params.IsDeterministicSource(pSource.SourceID) { - for _, pDetID := range pSource.Prices { - // TODO: verify the format of DetId is correct, since this is string, and we will make consensus with validator's power, so it's ok not to verify the format - // just make sure the DetId won't mess up with NS's placeholder id, the limitation of maximum count one validator can submit will be check by filter - if len(pDetID.DetID) == 0 { - // deterministic must have specified deterministicId - return errors.New("ds should have roundid") - } - // DS's price value will go through consensus process, so it's safe to skip the check here - } - // sanity check: NS submit only one price with detId=="" - } else if len(pSource.Prices) > 1 || len(pSource.Prices[0].DetID) > 0 { - return errors.New("ns should not have roundid") - } - } - return nil -} - -func (agc *AggregatorContext) checkMsg(msg *types.MsgCreatePrice, isCheckMode bool) error { - if err := agc.sanityCheck(msg); err != nil { - return err - } - - // check feeder is active - feederContext := agc.rounds[msg.FeederID] - if feederContext == nil { - return fmt.Errorf("context not exist for feederID:%d", msg.FeederID) - } - // This round had been sealed but current window not closed - if feederContext.status != roundStatusOpen { - if feederWorker := agc.aggregators[msg.FeederID]; feederWorker != nil { - if _, list4Aggregator := feederWorker.filtrate(msg); list4Aggregator != nil { - // record this message for performance evaluation(used for slashing) - feederWorker.recordMessage(msg.Creator, msg.FeederID, list4Aggregator) - } - } - // if the validator send a tx inside an alive window but the status had been changed to closed by enough power collected - // we should ignore the error for simulation to complete - if !isCheckMode { - return fmt.Errorf("context is not available for feederID:%d", msg.FeederID) - } - } - - // senity check on basedBlock - if msg.BasedBlock != feederContext.basedBlock { - return errors.New("baseblock not match") - } - - // check sources rule matches - if ok, err := agc.params.CheckRules(msg.FeederID, msg.Prices); !ok { - return err - } - - for _, pSource := range msg.Prices { - for _, pTimeDetID := range pSource.Prices { - if ok := agc.params.CheckDecimal(msg.FeederID, pTimeDetID.Decimal); !ok { - return fmt.Errorf("decimal not match for source ID %d and price ID %s", pSource.SourceID, pTimeDetID.DetID) - } - } - } - return nil -} - -func (agc *AggregatorContext) FillPrice(msg *types.MsgCreatePrice) (*PriceItemKV, *cache.ItemM, error) { - feederWorker := agc.aggregators[msg.FeederID] - // worker initialzed here reduce workload for Endblocker - if feederWorker == nil { - feederWorker = newWorker(msg.FeederID, agc) - agc.aggregators[msg.FeederID] = feederWorker - } - - if feederWorker.sealed { - if _, list4Aggregator := feederWorker.filtrate(msg); list4Aggregator != nil { - // record this message for performance evaluation(used for slashing) - feederWorker.recordMessage(msg.Creator, msg.FeederID, list4Aggregator) - } - return nil, nil, types.ErrPriceProposalIgnored.Wrap("price aggregation for this round has sealed") - } - - if listFilled := feederWorker.do(msg); listFilled != nil { - feederWorker.recordMessage(msg.Creator, msg.FeederID, listFilled) - if finalPrice := feederWorker.aggregate(); len(finalPrice) > 0 { - agc.rounds[msg.FeederID].status = roundStatusClosed - feederWorker.seal() - return &PriceItemKV{agc.params.GetTokenFeeder(msg.FeederID).TokenID, types.PriceTimeRound{ - Price: finalPrice, - Decimal: agc.params.GetTokenInfo(msg.FeederID).Decimal, - // TODO: check the format - Timestamp: msg.Prices[0].Prices[0].Timestamp, - RoundID: agc.rounds[msg.FeederID].nextRoundID, - }}, &cache.ItemM{FeederID: msg.FeederID}, nil - } - return nil, &cache.ItemM{FeederID: msg.FeederID, PSources: listFilled, Validator: msg.Creator}, nil - } - - // return nil, nil, errors.New("no valid price proposal to add for aggregation") - return nil, nil, types.ErrPriceProposalIgnored -} - -// NewCreatePrice receives msgCreatePrice message, and goes process: filter->aggregator, filter->calculator->aggregator -// non-deterministic data will goes directly into aggregator, and deterministic data will goes into calculator first to get consensus on the deterministic id. -func (agc *AggregatorContext) NewCreatePrice(ctx sdk.Context, msg *types.MsgCreatePrice) (*PriceItemKV, *cache.ItemM, error) { - if err := agc.checkMsg(msg, ctx.IsCheckTx()); err != nil { - return nil, nil, types.ErrInvalidMsg.Wrap(err.Error()) - } - return agc.FillPrice(msg) -} - -// prepare for new roundInfo, just update the status kept in memory -// executed at EndBlock stage, seall all success or expired roundInfo -// including possible aggregation and state update -// when validatorSet update, set force to true, to seal all alive round -// returns: 1st successful sealed, need to be written to KVStore, 2nd: failed sealed tokenID, use previous price to write to KVStore -func (agc *AggregatorContext) SealRound(ctx sdk.Context, force bool) (success []*PriceItemKV, failed []uint64, sealed []uint64, windowClosed []uint64) { - logger := ctx.Logger() - feederIDs := make([]uint64, 0, len(agc.rounds)) - for fID := range agc.rounds { - feederIDs = append(feederIDs, fID) - } - sort.Slice(feederIDs, func(i, j int) bool { - return feederIDs[i] < feederIDs[j] - }) - height := uint64(ctx.BlockHeight()) - // make sure feederIDs are accessed in order to calculate the indexOffset for slashing - windowClosedMap := make(map[uint64]bool) - for _, feederID := range feederIDs { - if agc.windowEnd(feederID, height) { - windowClosed = append(windowClosed, feederID) - windowClosedMap[feederID] = true - } - round := agc.rounds[feederID] - if round.status == roundStatusOpen { - feeder := agc.params.GetTokenFeeder(feederID) - // TODO: for mode=1, we don't do aggregate() here, since if it donesn't success in the transaction execution stage, it won't success here - // but it's not always the same for other modes, switch modes - switch common.Mode { - case types.ConsensusModeASAP: - offset := height - round.basedBlock - expired := feeder.EndBlock > 0 && height >= feeder.EndBlock - outOfWindow := offset >= uint64(common.MaxNonce) - - // an open round reach its end of window, increase offsetIndex for active valdiator and chech the performance(missing/malicious) - - if expired || outOfWindow || force { - failed = append(failed, feeder.TokenID) - if !expired { - logger.Debug("set round status from open to closed", "feederID", feederID, "force", force, "block", height) - round.status = roundStatusClosed - } - // TODO: optimize operformance - sealed = append(sealed, feederID) - if !windowClosedMap[feederID] { - logger.Debug("remove aggregators(workers) force/expired", "feederID", feederID) - agc.RemoveWorker(feederID) - } - } - default: - logger.Info("mode other than 1 is not support now") - } - } - // all status: 1->2, remove its aggregator - if agc.aggregators[feederID] != nil && agc.aggregators[feederID].sealed { - sealed = append(sealed, feederID) - } - } - return success, failed, sealed, windowClosed -} - -// PrepareEndBlock is called at EndBlock stage, to prepare the roundInfo for the next block(of input block) -func (agc *AggregatorContext) PrepareRoundEndBlock(ctx sdk.Context, block int64, forceSealHeight uint64) (newRoundFeederIDs []uint64) { - if block < 1 { - return newRoundFeederIDs - } - logger := ctx.Logger() - blockUint64 := uint64(block) - - for feederID, feeder := range agc.params.GetTokenFeeders() { - if feederID == 0 { - continue - } - if (feeder.EndBlock > 0 && feeder.EndBlock <= blockUint64) || feeder.StartBaseBlock > blockUint64 { - // this feeder is inactive - continue - } - - delta := blockUint64 - feeder.StartBaseBlock - left := delta % feeder.Interval - count := delta / feeder.Interval - latestBasedblock := blockUint64 - left - latestNextRoundID := feeder.StartRoundID + count - - logger.Info("PrepareRoundEndBlock", "feederID", feederID, "block", block, "latestBasedblock", latestBasedblock, "forceSealHeight", forceSealHeight, "position_in_round", left) - - feederIDUint64 := uint64(feederID) - round := agc.rounds[feederIDUint64] - if round == nil { - logger.Info("PrepareRoundEndBlock: initialize round info") - round = &roundInfo{ - basedBlock: latestBasedblock, - nextRoundID: latestNextRoundID, - } - if left >= uint64(common.MaxNonce) { - // since do sealround properly before prepareRound, this only possible happens in node restart, and nonce has been taken care of in kvStore - round.status = roundStatusClosed - logger.Info("PrepareRoundEndBlock: status_closed") - } else { - round.status = roundStatusOpen - logger.Info("PrepareRoundEndBlock: status_open") - if latestBasedblock < forceSealHeight { - // debug - logger.Debug("PrepareRoundEndBlock: status_closed due to forceseal") - round.status = roundStatusClosed - } - if left == 0 { - logger.Info("PrepareRoundEndBlock: add a new round") - // set nonce for corresponding feederID for new roud start - newRoundFeederIDs = append(newRoundFeederIDs, feederIDUint64) - } - } - agc.rounds[feederIDUint64] = round - } else { - // prepare a new round for exist roundInfo - if left == 0 { - logger.Info("PrepareRoundEndBlock: set existing round status to open") - round.basedBlock = latestBasedblock - round.nextRoundID = latestNextRoundID - round.status = roundStatusOpen - // set nonce for corresponding feederID for new roud start - newRoundFeederIDs = append(newRoundFeederIDs, feederIDUint64) - // drop previous worker - agc.RemoveWorker(feederIDUint64) - } else if round.status == roundStatusOpen && left >= uint64(common.MaxNonce) { - logger.Info("PrepareRoundEndBlock: set existing round status to closed") - // this shouldn't happen, if do sealround properly before prepareRound, basically for test only - // TODO: print error log here - round.status = roundStatusClosed - // TODO: just modify the status here, since sealRound should do all the related seal actions already when parepare invoked - } - } - } - return newRoundFeederIDs -} - -// SetParams sets the params field of aggregatorContext“ -func (agc *AggregatorContext) SetParams(p *types.Params) { - agc.params = p -} - -// SetValidatorPowers sets the map of validator's power for aggreagtorContext -func (agc *AggregatorContext) SetValidatorPowers(vp map[string]*big.Int) { - // t := big.NewInt(0) - agc.totalPower = big.NewInt(0) - agc.validatorsPower = make(map[string]*big.Int) - for addr, power := range vp { - agc.validatorsPower[addr] = power - agc.totalPower = new(big.Int).Add(agc.totalPower, power) - } -} - -// GetValidatorPowers returns the map of validator's power stored in aggregatorContext -func (agc *AggregatorContext) GetValidatorPowers() (vp map[string]*big.Int) { - return agc.validatorsPower -} - -func (agc *AggregatorContext) GetValidators() (validators []string) { - for k := range agc.validatorsPower { - validators = append(validators, k) - } - return -} - -// GetTokenIDFromAssetID returns tokenID for corresponding tokenID, it returns 0 if agc.params is nil or assetID not found in agc.params -func (agc *AggregatorContext) GetTokenIDFromAssetID(assetID string) int { - if agc.params == nil { - return 0 - } - return agc.params.GetTokenIDFromAssetID(assetID) -} - -// GetParams returns the params field of aggregatorContext -func (agc *AggregatorContext) GetParams() types.Params { - return *agc.params -} - -func (agc *AggregatorContext) GetParamsMaxSizePrices() uint64 { - return uint64(agc.params.MaxSizePrices) -} - -// GetFinalPriceListForFeederIDs get final price list for required feederIDs in format []{feederID, sourceID, detID, price} with asc of {feederID, sourceID} -// feederIDs is required to be ordered asc -func (agc *AggregatorContext) GetFinalPriceListForFeederIDs(feederIDs []uint64) []*types.AggFinalPrice { - ret := make([]*types.AggFinalPrice, 0, len(feederIDs)) - for _, feederID := range feederIDs { - feederWorker := agc.aggregators[feederID] - if feederWorker != nil { - if pList := feederWorker.getFinalPriceList(feederID); len(pList) > 0 { - ret = append(ret, pList...) - } - } - } - return ret -} - -// PerformanceReview compare results to decide whether the validator is effective, honest -func (agc *AggregatorContext) PerformanceReview(ctx sdk.Context, finalPrice *types.AggFinalPrice, validator string) (exist, matched bool) { - feederWorker := agc.aggregators[finalPrice.FeederID] - if feederWorker == nil { - // Log unexpected nil feederWorker for debugging - ctx.Logger().Error( - "unexpected nil feederWorker in PerformanceReview", - "feederID", finalPrice.FeederID, - "validator", validator, - ) - // Treat validator as effective & honest to avoid unfair penalties - exist = true - matched = true - return - } - exist, matched = feederWorker.check(validator, finalPrice.FeederID, finalPrice.SourceID, finalPrice.Price, finalPrice.DetID) - return -} - -func (agc AggregatorContext) windowEnd(feederID, height uint64) bool { - feeder := agc.params.TokenFeeders[feederID] - if (feeder.EndBlock > 0 && feeder.EndBlock <= height) || feeder.StartBaseBlock > height { - return false - } - delta := height - feeder.StartBaseBlock - left := delta % feeder.Interval - return left == uint64(common.MaxNonce) -} - -func (agc *AggregatorContext) RemoveWorker(feederID uint64) { - delete(agc.aggregators, feederID) -} - -// NewAggregatorContext returns a new instance of AggregatorContext -func NewAggregatorContext() *AggregatorContext { - return &AggregatorContext{ - validatorsPower: make(map[string]*big.Int), - totalPower: big.NewInt(0), - rounds: make(map[uint64]*roundInfo), - aggregators: make(map[uint64]*worker), - } -} diff --git a/x/oracle/keeper/aggregator/context_test.go b/x/oracle/keeper/aggregator/context_test.go deleted file mode 100644 index ef97c5b73..000000000 --- a/x/oracle/keeper/aggregator/context_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package aggregator - -import ( - "math/big" - - . "github.com/agiledragon/gomonkey/v2" - sdk "github.com/cosmos/cosmos-sdk/types" - // . "github.com/smartystreets/goconvey/convey" -) - -// func TestAggregatorContext(t *testing.T) { -// Convey("init aggregatorContext with default params", t, func() { -// agc := initAggregatorContext4Test() -// var ctx sdk.Context -// Convey("prepare round to gengerate round info of feeders for next block", func() { -// Convey("pepare within the window", func() { -// p := patchBlockHeight(12) -// agc.PrepareRoundEndBlock(ctx, 11, 0) -// -// Convey("for empty round list", func() { -// So(*agc.rounds[1], ShouldResemble, roundInfo{10, 2, 1}) -// }) -// -// Convey("update already exist round info", func() { -// p.Reset() -// time.Sleep(1 * time.Second) -// patchBlockHeight(10 + int64(common.MaxNonce) + 1) -// -// agc.PrepareRoundEndBlock(ctx, 10+int64(common.MaxNonce), 0) -// So(agc.rounds[1].status, ShouldEqual, 2) -// }) -// p.Reset() -// time.Sleep(1 * time.Second) -// }) -// Convey("pepare outside the window", func() { -// Convey("for empty round list", func() { -// p := patchBlockHeight(10 + int64(common.MaxNonce) + 1) -// agc.PrepareRoundEndBlock(ctx, 10+int64(common.MaxNonce), 0) -// So(agc.rounds[1].status, ShouldEqual, 2) -// p.Reset() -// time.Sleep(1 * time.Second) -// }) -// }) -// }) -// -// Convey("seal existing round without any msg recieved", func() { -// p := patchBlockHeight(11) -// agc.PrepareRoundEndBlock(ctx, 10, 0) -// Convey("seal when exceed the window", func() { -// So(agc.rounds[1].status, ShouldEqual, 1) -// p.Reset() -// time.Sleep(1 * time.Second) -// patchBlockHeight(13) -// agc.SealRound(ctx, false) -// So(agc.rounds[1].status, ShouldEqual, 2) -// }) -// -// Convey("force seal by required", func() { -// p.Reset() -// time.Sleep(1 * time.Second) -// patchBlockHeight(12) -// agc.SealRound(ctx, false) -// So(agc.rounds[1].status, ShouldEqual, 1) -// agc.SealRound(ctx, true) -// So(agc.rounds[1].status, ShouldEqual, 2) -// }) -// p.Reset() -// time.Sleep(1 * time.Second) -// }) -// }) -// } - -func initAggregatorContext4Test() *AggregatorContext { - agc := NewAggregatorContext() - - validatorPowers := map[string]*big.Int{ - "v1": big.NewInt(1), - "v2": big.NewInt(1), - "v3": big.NewInt(1), - } - - p := defaultParams - - agc.SetValidatorPowers(validatorPowers) - agc.SetParams(&p) - return agc -} - -func patchBlockHeight(h int64) *Patches { - return ApplyMethod(sdk.Context{}, "BlockHeight", func(sdk.Context) int64 { - return h - }) -} diff --git a/x/oracle/keeper/aggregator/filter.go b/x/oracle/keeper/aggregator/filter.go deleted file mode 100644 index a179373a3..000000000 --- a/x/oracle/keeper/aggregator/filter.go +++ /dev/null @@ -1,102 +0,0 @@ -package aggregator - -import ( - "strconv" - - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/common" - "github.com/ExocoreNetwork/exocore/x/oracle/types" -) - -type filter struct { - maxNonce int - maxDetID int - // nonce start from 1 - validatorNonce map[string]*common.Set[int32] - // validator_sourceId -> roundID, NS use 0 - validatorSource map[string]*common.Set[string] -} - -func newFilter(maxNonce, maxDetID int) *filter { - return &filter{ - maxNonce: maxNonce, - maxDetID: maxDetID, - validatorNonce: make(map[string]*common.Set[int32]), - validatorSource: make(map[string]*common.Set[string]), - } -} - -func (f *filter) copy4CheckTx() *filter { - ret := *f - ret.validatorNonce = make(map[string]*common.Set[int32], len(f.validatorNonce)) - ret.validatorSource = make(map[string]*common.Set[string], len(f.validatorSource)) - - for k, v := range f.validatorNonce { - ret.validatorNonce[k] = v.Copy() - } - - for k, v := range f.validatorSource { - ret.validatorSource[k] = v.Copy() - } - - return &ret -} - -func (f *filter) newVNSet() *common.Set[int32] { - return common.NewSet[int32](f.maxNonce) -} - -func (f *filter) newVSSet() *common.Set[string] { - return common.NewSet[string](f.maxDetID) -} - -// add priceWithSource into calculator list and aggregator list depends on the source type(deterministic/non-deterministic) -func (f *filter) addPSource(pSources []*types.PriceSource, validator string) (list4Calculator []*types.PriceSource, list4Aggregator []*types.PriceSource) { - for _, pSource := range pSources { - // check conflicts or duplicate data for the same roundID within the same source - if len(pSource.Prices[0].DetID) > 0 { - // #nosec G115 - k := validator + strconv.Itoa(int(pSource.SourceID)) - detIDs := f.validatorSource[k] - if detIDs == nil { - detIDs = f.newVSSet() - f.validatorSource[k] = detIDs - } - - pSourceTmp := &types.PriceSource{ - SourceID: pSource.SourceID, - Prices: make([]*types.PriceTimeDetID, 0, len(pSource.Prices)), - Desc: pSource.Desc, - } - - for _, pDetID := range pSource.Prices { - if ok := detIDs.Add(pDetID.DetID); ok { - // deterministic id has not seen in filter and limitation of ids this souce has not reached - pSourceTmp.Prices = append(pSourceTmp.Prices, pDetID) - } - } - if len(pSourceTmp.Prices) > 0 { - list4Calculator = append(list4Calculator, pSourceTmp) - list4Aggregator = append(list4Aggregator, pSourceTmp) - } - } else { - // add non-deterministic pSource value into aggregator list - list4Aggregator = append(list4Aggregator, pSource) - } - } - return list4Calculator, list4Aggregator -} - -// filtrate checks data from MsgCreatePrice, and will drop the conflict or duplicate data, it will then fill data into calculator(for deterministic source data to get to consensus) and aggregator (for both deterministic and non0-deterministic source data run 2-layers aggregation to get the final price) -func (f *filter) filtrate(price *types.MsgCreatePrice) (list4Calculator []*types.PriceSource, list4Aggregator []*types.PriceSource) { - validator := price.Creator - nonces := f.validatorNonce[validator] - if nonces == nil { - nonces = f.newVNSet() - f.validatorNonce[validator] = nonces - } - - if ok := nonces.Add(price.Nonce); ok { - list4Calculator, list4Aggregator = f.addPSource(price.Prices, validator) - } - return -} diff --git a/x/oracle/keeper/aggregator/filter_test.go b/x/oracle/keeper/aggregator/filter_test.go deleted file mode 100644 index 085251f9b..000000000 --- a/x/oracle/keeper/aggregator/filter_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package aggregator - -import ( - "testing" - - "github.com/ExocoreNetwork/exocore/x/oracle/types" - . "github.com/smartystreets/goconvey/convey" -) - -func TestFilter(t *testing.T) { - Convey("test aggregator_filter", t, func() { - f := newFilter(3, 5) - ptd1 := newPTD("1", "600000") - ptd2 := newPTD("2", "600050") - ptd3 := newPTD("3", "600070") - ptd4 := newPTD("4", "601000") - ptd5 := newPTD("5", "602000") - ptd6 := newPTD("6", "603000") - - ps1 := &types.PriceSource{ - SourceID: 1, - Prices: []*types.PriceTimeDetID{ - ptd1, - ptd2, - }, - } - - ps := []*types.PriceSource{ps1} - msg := &types.MsgCreatePrice{ - Creator: "v1", - FeederID: 1, - Prices: ps, - BasedBlock: 10, - Nonce: 1, - } - l4c, l4a := f.filtrate(msg) - - Convey("add first valid msg", func() { - So(l4c, ShouldResemble, ps) - So(l4a, ShouldResemble, ps) - }) - - Convey("add duplicate nonce msg", func() { - ps1.Prices[0] = ptd3 - l4c, l4a = f.filtrate(msg) - So(l4c, ShouldBeNil) - So(l4a, ShouldBeNil) - }) - - Convey("add duplicate detId", func() { - msg.Nonce = 2 - l4c, l4a = f.filtrate(msg) - Convey("add with new nonce", func() { - So(l4c, ShouldBeNil) - So(l4a, ShouldBeNil) - }) - Convey("update with new detId but use duplicate nonce", func() { - msg.Nonce = 2 - ps1.Prices[0] = ptd3 - l4c, l4a := f.filtrate(msg) - So(l4c, ShouldBeNil) - So(l4a, ShouldBeNil) - }) - }) - - Convey("add new detId with new nonce", func() { - msg.Nonce = 2 - ps1.Prices[0] = ptd3 - l4c, l4a = f.filtrate(msg) - ps1.Prices = ps1.Prices[:1] - ps1.Prices[0] = ptd3 - psReturn := []*types.PriceSource{ps1} - So(l4c, ShouldResemble, psReturn) - So(l4a, ShouldResemble, psReturn) - }) - - Convey("add too many nonce", func() { - msg.Nonce = 2 - ps1.Prices[0] = ptd3 - f.filtrate(msg) - - msg.Nonce = 3 - ps1.Prices[0] = ptd4 - l4c, _ = f.filtrate(msg) - So(l4c[0].Prices, ShouldContain, ptd4) - - msg.Nonce = 4 - ps1.Prices[0] = ptd5 - l4c, _ = f.filtrate(msg) - So(l4c, ShouldBeNil) - }) - - Convey("add too many DetIds", func() { - msg.Nonce = 2 - ps1.Prices = []*types.PriceTimeDetID{ptd3, ptd4, ptd5, ptd6} - l4c, l4a = f.filtrate(msg) - So(l4c, ShouldResemble, l4a) - So(l4c[0].Prices, ShouldContain, ptd3) - So(l4c[0].Prices, ShouldContain, ptd4) - So(l4c[0].Prices, ShouldContain, ptd5) - So(l4c[0].Prices, ShouldNotContain, ptd6) - }) - }) -} diff --git a/x/oracle/keeper/aggregator/helper_test.go b/x/oracle/keeper/aggregator/helper_test.go deleted file mode 100644 index f993c6b8e..000000000 --- a/x/oracle/keeper/aggregator/helper_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package aggregator - -import "github.com/ExocoreNetwork/exocore/x/oracle/types" - -func newPTD(detID, price string) *types.PriceTimeDetID { - return &types.PriceTimeDetID{ - Price: price, - Decimal: 1, - Timestamp: "-", - DetID: detID, - } -} - -func newPS(sourceID uint64, prices ...*types.PriceTimeDetID) *types.PriceSource { - return &types.PriceSource{ - SourceID: sourceID, - Prices: prices, - } -} diff --git a/x/oracle/keeper/aggregator/info_test.go b/x/oracle/keeper/aggregator/info_test.go deleted file mode 100644 index dbab8f01c..000000000 --- a/x/oracle/keeper/aggregator/info_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package aggregator - -import ( - "math/big" - - "github.com/ExocoreNetwork/exocore/x/oracle/types" -) - -var ( - one = big.NewInt(1) - zero = big.NewInt(0) - ten = big.NewInt(10) - eleven = big.NewInt(11) - fifteen = big.NewInt(15) - twenty = big.NewInt(20) -) - -var ( - pTD1 = newPTD("1", "10") - pTD2 = newPTD("2", "12") - pTD3 = newPTD("3", "15") - pTD2M = newPTD("2", "11") - pTD3M = newPTD("3", "19") - // 1-10, 2-12 - pS1 = []*types.PriceSource{newPS(1, pTD1, pTD2)} - // 2-12, 3-1 - pS2 = []*types.PriceSource{newPS(1, pTD3, pTD2)} - // 1-10, 2-11(m) - pS3 = []*types.PriceSource{newPS(1, pTD1, pTD2M)} - // 2-12, 3-19(m) - pS4 = []*types.PriceSource{newPS(1, pTD2, pTD3M)} - // 1-10, 3-19(m) - pS5 = []*types.PriceSource{newPS(1, pTD1, pTD3M)} - - pS6 = []*types.PriceSource{newPS(2, pTD1)} - - // 1-10, 2-12 - pS21 = []*types.PriceSource{newPS(1, pTD1, pTD2), newPS(2, pTD1, pTD3)} - // 2-12, 3-15 - pS22 = []*types.PriceSource{newPS(1, pTD3, pTD2), newPS(2, pTD2, pTD3)} - // 1-10, 2-11(m) - pS23 = []*types.PriceSource{newPS(1, pTD1, pTD2M), newPS(2, pTD2M, pTD1)} - // 2-12, 3-19(m) - pS24 = []*types.PriceSource{newPS(1, pTD2, pTD3M), newPS(2, pTD3, pTD2M)} - // 1-10, 3-19(m) - pS25 = []*types.PriceSource{newPS(1, pTD1, pTD3M), newPS(2, pTD2M, pTD3M)} -) - -var defaultParams = types.Params{ - Chains: []*types.Chain{{Name: "-", Desc: "-"}, {Name: "Ethereum", Desc: "-"}}, - Tokens: []*types.Token{{}, {Name: "eth", ChainID: 1, ContractAddress: "0xabc", Decimal: 18, Active: true, AssetID: ""}}, - Sources: []*types.Source{{}, {Name: "chainLink", Entry: &types.Endpoint{}, Valid: true, Deterministic: true}}, - Rules: []*types.RuleSource{{}, {SourceIDs: []uint64{1}}}, - TokenFeeders: []*types.TokenFeeder{{}, {TokenID: 1, RuleID: 1, StartRoundID: 1, StartBaseBlock: 0, Interval: 10, EndBlock: 0}}, - MaxNonce: 3, - ThresholdA: 2, - ThresholdB: 3, - Mode: types.ConsensusModeASAP, - MaxDetId: 5, -} diff --git a/x/oracle/keeper/aggregator/util.go b/x/oracle/keeper/aggregator/util.go deleted file mode 100644 index 0c1bbc47b..000000000 --- a/x/oracle/keeper/aggregator/util.go +++ /dev/null @@ -1,11 +0,0 @@ -package aggregator - -import "math/big" - -func copyBigInt(i *big.Int) *big.Int { - if i == nil { - return nil - } - - return big.NewInt(0).Set(i) -} diff --git a/x/oracle/keeper/aggregator/worker.go b/x/oracle/keeper/aggregator/worker.go deleted file mode 100644 index a676fd2f4..000000000 --- a/x/oracle/keeper/aggregator/worker.go +++ /dev/null @@ -1,121 +0,0 @@ -package aggregator - -import ( - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/common" - "github.com/ExocoreNetwork/exocore/x/oracle/types" - sdk "github.com/cosmos/cosmos-sdk/types" -) - -// worker is the actual instance used to calculate final price for each tokenFeeder's round. Which means, every tokenFeeder corresponds to a specified token, and for that tokenFeeder, each round we use a worker instance to calculate the final price -type worker struct { - sealed bool - price string - decimal int32 - // mainly used for deterministic source data to check conflicts and validation - f *filter - // used to get to consensus on deterministic source's data - c *calculator - // when enough data(exceeds threshold) collected, aggregate to conduct the final price - a *aggregator - ctx *AggregatorContext - // TODO: move outside into context through .ctx - records recordMsg -} - -// recordKey used to retrieve messages from records to evaluate that if a validator report proper price for a specific feederID+sourceID -type recordKey struct { - validator string - feederID uint64 - sourceID uint64 -} - -// recordMsg define wrap the map for fast access to validator's message info -type recordMsg map[recordKey][]*types.PriceTimeDetID - -func newRecordMsg() recordMsg { - return make(map[recordKey][]*types.PriceTimeDetID) -} - -func (r recordMsg) get(validator string, feederID, sourceID uint64) []*types.PriceTimeDetID { - v := r[recordKey{validator, feederID, sourceID}] - return v -} - -func (r recordMsg) check(validator string, feederID, sourceID uint64, price, detID string) (exist, matched bool) { - prices := r.get(validator, feederID, sourceID) - for _, p := range prices { - if p.DetID == detID { - exist = true - if p.Price == price { - matched = true - return - } - } - } - return -} - -func (r recordMsg) set(creator string, feederID uint64, priceSources []*types.PriceSource) { - accAddress, _ := sdk.AccAddressFromBech32(creator) - validator := sdk.ConsAddress(accAddress).String() - for _, price := range priceSources { - r[recordKey{validator, feederID, price.SourceID}] = price.Prices - } -} - -// GetFinalPriceList relies requirement to aggregator inside them to get final price list -// []{feederID, sourceID, detID, price} in asc order of {soruceID} -func (w *worker) getFinalPriceList(feederID uint64) []*types.AggFinalPrice { - return w.a.getFinalPriceList(feederID) -} - -func (w *worker) filtrate(msg *types.MsgCreatePrice) (list4Calculator []*types.PriceSource, list4Aggregator []*types.PriceSource) { - return w.f.filtrate(msg) -} - -func (w *worker) recordMessage(creator string, feederID uint64, priceSources []*types.PriceSource) { - w.records.set(creator, feederID, priceSources) -} - -func (w *worker) check(validator string, feederID, sourceID uint64, price, detID string) (exist, matched bool) { - return w.records.check(validator, feederID, sourceID, price, detID) -} - -func (w *worker) do(msg *types.MsgCreatePrice) []*types.PriceSource { - list4Calculator, list4Aggregator := w.f.filtrate(msg) - if list4Aggregator != nil { - accAddress, _ := sdk.AccAddressFromBech32(msg.Creator) - validator := sdk.ConsAddress(accAddress).String() - power := w.ctx.validatorsPower[validator] - w.a.fillPrice(list4Aggregator, validator, power) - if confirmedRounds := w.c.fillPrice(list4Calculator, validator, power); confirmedRounds != nil { - w.a.confirmDSPrice(confirmedRounds) - } - } - return list4Aggregator -} - -func (w *worker) aggregate() string { - return w.a.aggregate() -} - -// not concurrency safe -func (w *worker) seal() { - if w.sealed { - return - } - w.sealed = true - w.price = w.a.aggregate() -} - -// newWorker new a instance for a tokenFeeder's specific round -func newWorker(feederID uint64, agc *AggregatorContext) *worker { - return &worker{ - f: newFilter(int(common.MaxNonce), int(common.MaxDetID)), - c: newCalculator(len(agc.validatorsPower), agc.totalPower), - a: newAggregator(len(agc.validatorsPower), agc.totalPower), - decimal: agc.params.GetTokenInfo(feederID).Decimal, - ctx: agc, - records: newRecordMsg(), - } -} diff --git a/x/oracle/keeper/cache/caches.go b/x/oracle/keeper/cache/caches.go deleted file mode 100644 index 47db24b53..000000000 --- a/x/oracle/keeper/cache/caches.go +++ /dev/null @@ -1,234 +0,0 @@ -package cache - -import ( - "math/big" - - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/common" - "github.com/ExocoreNetwork/exocore/x/oracle/types" - sdk "github.com/cosmos/cosmos-sdk/types" -) - -var zeroBig = big.NewInt(0) - -type ( - ItemV map[string]*big.Int - ItemP types.Params - ItemM types.MsgItem -) - -type Cache struct { - msg *cacheMsgs - validators *cacheValidator - params *cacheParams -} - -type cacheMsgs []*ItemM - -// used to track validator change -type cacheValidator struct { - validators map[string]*big.Int - update bool -} - -// used to track params change -type cacheParams struct { - // params types.Params - params *ItemP - update bool -} - -func (c *cacheMsgs) add(item *ItemM) { - *c = append(*c, item) -} - -// remove removes all items with the same feederID -func (c *cacheMsgs) remove(item *ItemM) { - var newCache []*ItemM - for _, msg := range *c { - if msg.FeederID != item.FeederID { - newCache = append(newCache, msg) - } - } - *c = newCache -} - -func (c cacheMsgs) commit(ctx sdk.Context, k common.KeeperOracle) { - block := uint64(ctx.BlockHeight()) - - recentMsgs := types.RecentMsg{ - Block: block, - Msgs: make([]*types.MsgItem, 0), - } - - for _, msg := range c { - msgTmp := types.MsgItem(*msg) - recentMsgs.Msgs = append(recentMsgs.Msgs, &msgTmp) - } - index, _ := k.GetIndexRecentMsg(ctx) - - i := 0 - for ; i < len(index.Index); i++ { - b := index.Index[i] - if b > block-uint64(common.MaxNonce) { - break - } - k.RemoveRecentMsg(ctx, b) - } - index.Index = index.Index[i:] - - k.SetRecentMsg(ctx, recentMsgs) - - index.Index = append(index.Index, block) - k.SetIndexRecentMsg(ctx, index) -} - -func (c *cacheValidator) add(validators map[string]*big.Int) { - for operator, newPower := range validators { - if power, ok := c.validators[operator]; ok { - if newPower.Cmp(zeroBig) == 0 { - delete(c.validators, operator) - c.update = true - } else if power.Cmp(newPower) != 0 { - c.validators[operator].Set(newPower) - c.update = true - } - } else { - c.update = true - np := *newPower - c.validators[operator] = &np - } - } -} - -func (c *cacheValidator) commit(ctx sdk.Context, k common.KeeperOracle) { - block := uint64(ctx.BlockHeight()) - k.SetValidatorUpdateBlock(ctx, types.ValidatorUpdateBlock{Block: block}) -} - -func (c *cacheParams) add(p ItemP) { - // params' update is triggered when params is actually updated, so no need to do comparison here, just udpate and mark the flag - // TODO: add comparison check, that's something should be done for validation - c.params = &p - c.update = true -} - -func (c *cacheParams) commit(ctx sdk.Context, k common.KeeperOracle) { - block := uint64(ctx.BlockHeight()) - index, _ := k.GetIndexRecentParams(ctx) - i := 0 - for ; i < len(index.Index); i++ { - b := index.Index[i] - if b >= block-uint64(common.MaxNonce) { - break - } - k.RemoveRecentParams(ctx, b) - } - if i > 0 && i == len(index.Index) { - i-- - } - index.Index = index.Index[i:] - // remove and append for KVStore - index.Index = append(index.Index, block) - k.SetIndexRecentParams(ctx, index) - - p := types.Params(*c.params) - k.SetRecentParams(ctx, types.RecentParams{ - Block: block, - Params: &p, - }) -} - -// memory cache -func (c *Cache) AddCache(i any) { - switch item := i.(type) { - case *ItemM: - c.msg.add(item) - case ItemP: - c.params.add(item) - case ItemV: - c.validators.add(item) - default: - panic("no other types are support") - } -} - -// RemoveCache removes all cached msgs with the same feederID -func (c *Cache) RemoveCache(i any) { - if item, isItemM := i.(*ItemM); isItemM { - c.msg.remove(item) - } -} - -func (c *Cache) GetCache(i any) bool { - switch item := i.(type) { - case ItemV: - if item == nil { - return false - } - for addr, power := range c.validators.validators { - item[addr] = power - } - return c.validators.update - case *ItemP: - if item == nil { - return false - } - *item = *c.params.params - return c.params.update - case *([]*ItemM): - if item == nil { - return false - } - *item = *c.msg - return len(*c.msg) > 0 - default: - return false - } -} - -// SkipCommit skip real commit by setting the updage flag to false -func (c *Cache) SkipCommit() { - c.validators.update = false - c.params.update = false -} - -// CommitCache commits the cache to the KVStore -func (c *Cache) CommitCache(ctx sdk.Context, reset bool, k common.KeeperOracle) (msgUpdated, validatorsUpdated, paramsUpdated bool) { - if len(*(c.msg)) > 0 { - c.msg.commit(ctx, k) - *(c.msg) = make([]*ItemM, 0) - msgUpdated = true - } - - if c.validators.update { - c.validators.commit(ctx, k) - c.validators.update = false - validatorsUpdated = true - } - - if c.params.update { - c.params.commit(ctx, k) - c.params.update = false - paramsUpdated = true - } - if reset { - c.ResetCaches() - } - return -} - -func (c *Cache) ResetCaches() { - *c = *(NewCache()) -} - -func NewCache() *Cache { - return &Cache{ - msg: new(cacheMsgs), - validators: &cacheValidator{ - validators: make(map[string]*big.Int), - }, - params: &cacheParams{ - params: &ItemP{}, - }, - } -} diff --git a/x/oracle/keeper/cache/caches_test.go b/x/oracle/keeper/cache/caches_test.go deleted file mode 100644 index 2d9738781..000000000 --- a/x/oracle/keeper/cache/caches_test.go +++ /dev/null @@ -1,112 +0,0 @@ -package cache - -import ( - "math/big" - "testing" - - "github.com/ExocoreNetwork/exocore/x/oracle/types" - . "github.com/smartystreets/goconvey/convey" - // "go.uber.org/mock/gomock" -) - -func TestCache(t *testing.T) { - c := NewCache() - p := defaultParams - pWrapped := ItemP(p) - - // ctrl := gomock.NewController(t) - // defer ctrl.Finish() - // ko := common.NewMockKeeperOracle(ctrl) - // c.AddCache(CacheItemP(&pWrapped), ko) - - Convey("test cache", t, func() { - Convey("add pramams item", func() { - c.AddCache(pWrapped) - pReturn := &ItemP{} - c.GetCache(pReturn) - So(*pReturn, ShouldResemble, pWrapped) - }) - - Convey("add validatorPower item", func() { - validatorPowers := map[string]*big.Int{ - "v1": big.NewInt(100), - "v2": big.NewInt(109), - "v3": big.NewInt(119), - } - c.AddCache(ItemV(validatorPowers)) - vpReturn := make(map[string]*big.Int) - Convey("for empty cache", func() { - c.GetCache(ItemV(vpReturn)) - So(vpReturn, ShouldResemble, validatorPowers) - }) - Convey("then update validatorPower item for this cache", func() { - validaotrPowers := map[string]*big.Int{ - // add v5 - "v5": big.NewInt(123), - // remove v1 - "v1": big.NewInt(0), - // update v2 - "v2": big.NewInt(199), - } - c.AddCache(ItemV(validaotrPowers)) - c.GetCache(ItemV(vpReturn)) - So(vpReturn, ShouldNotContainKey, "v1") - So(vpReturn, ShouldContainKey, "v5") - So(vpReturn["v2"], ShouldResemble, big.NewInt(199)) - }) - }) - - Convey("add msg item", func() { - msgItems := []*ItemM{ - { - FeederID: 1, - PSources: []*types.PriceSource{ - { - SourceID: 1, - Prices: []*types.PriceTimeDetID{ - {Price: "600000", Decimal: 1, Timestamp: "-", DetID: "1"}, {Price: "620000", Decimal: 1, Timestamp: "-", DetID: "2"}, - }, - }, - }, - Validator: "v1", - }, - { - FeederID: 1, - PSources: []*types.PriceSource{ - {SourceID: 1, Prices: []*types.PriceTimeDetID{{Price: "600000", Decimal: 1, Timestamp: "-", DetID: "4"}, {Price: "620000", Decimal: 1, Timestamp: "-", DetID: "3"}}}, - }, - Validator: "v1", - }, - { - FeederID: 2, - PSources: []*types.PriceSource{{SourceID: 1, Prices: []*types.PriceTimeDetID{{Price: "30000", Decimal: 1, Timestamp: "-", DetID: "4"}, {Price: "32000", Decimal: 1, Timestamp: "-", DetID: "3"}}}}, - Validator: "v2", - }, - } - c.AddCache(msgItems[0]) - msgItemsReturn := make([]*ItemM, 0, 3) - Convey("add single item", func() { - c.GetCache(&msgItemsReturn) - So(msgItemsReturn, ShouldContain, msgItems[0]) - }) - Convey("add more items", func() { - c.AddCache(msgItems[1]) - c.AddCache(msgItems[2]) - - c.GetCache(&msgItemsReturn) - So(msgItemsReturn, ShouldContain, msgItems[0]) - So(msgItemsReturn, ShouldContain, msgItems[2]) - }) - Convey("remove two items with same feederID", func() { - c.AddCache(msgItems[1]) - c.AddCache(msgItems[2]) - c.RemoveCache(msgItems[0]) - - c.GetCache(&msgItemsReturn) - So(msgItemsReturn, ShouldContain, msgItems[2]) - So(msgItemsReturn, ShouldNotContain, msgItems[0]) - So(msgItemsReturn, ShouldNotContain, msgItems[1]) - }) - }) - }) -} diff --git a/x/oracle/keeper/cache/info_test.go b/x/oracle/keeper/cache/info_test.go deleted file mode 100644 index 8514b8a68..000000000 --- a/x/oracle/keeper/cache/info_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package cache - -import "github.com/ExocoreNetwork/exocore/x/oracle/types" - -var defaultParams = types.Params{ - Chains: []*types.Chain{{Name: "-", Desc: "-"}, {Name: "Ethereum", Desc: "-"}}, - Tokens: []*types.Token{{}, {Name: "eth", ChainID: 1, ContractAddress: "0xabc", Decimal: 18, Active: true}}, - Sources: []*types.Source{{}, {Name: "chainLink", Entry: &types.Endpoint{}, Valid: true, Deterministic: true}}, - Rules: []*types.RuleSource{{}, {SourceIDs: []uint64{1}}}, - TokenFeeders: []*types.TokenFeeder{{}, {TokenID: 1, RuleID: 1, StartRoundID: 1, StartBaseBlock: 0, Interval: 10, EndBlock: 0}}, -} diff --git a/x/oracle/keeper/common/common_test.go b/x/oracle/keeper/common/common_test.go deleted file mode 100644 index 08944dcaf..000000000 --- a/x/oracle/keeper/common/common_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package common - -import ( - "testing" - - "cosmossdk.io/math" - sdk "github.com/cosmos/cosmos-sdk/types" - . "github.com/smartystreets/goconvey/convey" - "go.uber.org/mock/gomock" -) - -//go:generate mockgen -destination mock_keeper_test.go -package common github.com/ExocoreNetwork/exocore/x/oracle/keeper/common KeeperOracle - -//go:generate mockgen -destination mock_validator_test.go -package common github.com/cosmos/cosmos-sdk/x/staking/types ValidatorI - -func TestMock(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - ko := NewMockKeeperOracle(ctrl) - - ko.EXPECT().GetLastTotalPower(gomock.Any()).Return(math.NewInt(99)) - - x := ko.GetLastTotalPower(sdk.Context{}) - _ = x - - Convey("mock oracle keeper", t, func() { - Convey("GetLastTotalPower", func() { So(x, ShouldResemble, math.NewInt(99)) }) - }) -} diff --git a/x/oracle/keeper/common/expected_keepers.go b/x/oracle/keeper/common/expected_keepers.go index f668d139b..d5bebe7de 100644 --- a/x/oracle/keeper/common/expected_keepers.go +++ b/x/oracle/keeper/common/expected_keepers.go @@ -1,11 +1,14 @@ package common import ( + "time" + sdkmath "cosmossdk.io/math" dogfoodkeeper "github.com/ExocoreNetwork/exocore/x/dogfood/keeper" dogfoodtypes "github.com/ExocoreNetwork/exocore/x/dogfood/types" "github.com/ExocoreNetwork/exocore/x/oracle/types" abci "github.com/cometbft/cometbft/abci/types" + "github.com/cometbft/cometbft/libs/log" sdk "github.com/cosmos/cosmos-sdk/types" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" ) @@ -14,12 +17,36 @@ type Price struct { Value sdkmath.Int Decimal uint8 } - +type SlashingKeeper interface { + JailUntil(sdk.Context, sdk.ConsAddress, time.Time) +} type KeeperOracle interface { KeeperDogfood - + SlashingKeeper + + Logger(ctx sdk.Context) log.Logger + AddZeroNonceItemWithFeederIDsForValidators(ctx sdk.Context, feederIDs []uint64, validators []string) + InitValidatorReportInfo(ctx sdk.Context, validator string, height int64) + ClearAllValidatorReportInfo(ctx sdk.Context) + ClearAllValidatorMissedRoundBitArray(ctx sdk.Context) + GrowRoundID(ctx sdk.Context, tokenID uint64) (price string, roundID uint64) + AppendPriceTR(ctx sdk.Context, tokenID uint64, priceTR types.PriceTimeRound, detID string) bool + GetValidatorReportInfo(ctx sdk.Context, validator string) (info types.ValidatorReportInfo, found bool) + GetMaliciousJailDuration(ctx sdk.Context) (res time.Duration) + ClearValidatorMissedRoundBitArray(ctx sdk.Context, validator string) + GetReportedRoundsWindow(ctx sdk.Context) int64 + GetValidatorMissedRoundBitArray(ctx sdk.Context, validator string, index uint64) bool + SetValidatorMissedRoundBitArray(ctx sdk.Context, validator string, index uint64, missed bool) + GetMinReportedPerWindow(ctx sdk.Context) int64 + GetMissJailDuration(ctx sdk.Context) (res time.Duration) + SetValidatorReportInfo(ctx sdk.Context, validator string, info types.ValidatorReportInfo) + GetSlashFractionMalicious(ctx sdk.Context) (res sdk.Dec) + SetValidatorUpdateForCache(sdk.Context, types.ValidatorUpdateBlock) + SetParamsForCache(sdk.Context, types.RecentParams) + SetMsgItemsForCache(sdk.Context, types.RecentMsg) + GetRecentParamsWithinMaxNonce(ctx sdk.Context) (recentParamsList []*types.RecentParams, prev, latest types.RecentParams) + GetAllRecentMsg(ctx sdk.Context) (list []types.RecentMsg) GetParams(sdk.Context) types.Params - GetIndexRecentMsg(sdk.Context) (types.IndexRecentMsg, bool) GetAllRecentMsgAsMap(sdk.Context) map[int64][]*types.MsgItem @@ -34,15 +61,12 @@ type KeeperOracle interface { SetIndexRecentParams(sdk.Context, types.IndexRecentParams) SetRecentParams(sdk.Context, types.RecentParams) - SetValidatorUpdateBlock(sdk.Context, types.ValidatorUpdateBlock) - RemoveRecentParams(sdk.Context, uint64) RemoveRecentMsg(sdk.Context, uint64) RemoveNonceWithValidator(ctx sdk.Context, validator string) - RemoveNonceWithValidatorAndFeederID(ctx sdk.Context, validator string, feederID uint64) bool - RemoveNonceWithFeederIDForValidators(ctx sdk.Context, feederID uint64, validators []string) - RemoveNonceWithFeederIDForAll(ctx sdk.Context, feederID uint64) + RemoveNonceWithFeederIDsForValidators(ctx sdk.Context, feederIDs []uint64, validators []string) + RemoveNonceWithFeederIDsForAll(ctx sdk.Context, feederID []uint64) SetNonce(ctx sdk.Context, nonce types.ValidatorNonce) GetSpecifiedAssetsPrice(ctx sdk.Context, assetID string) (types.Price, error) diff --git a/x/oracle/keeper/common/mock_keeper_test.go b/x/oracle/keeper/common/mock_keeper_test.go deleted file mode 100644 index 0e6f96e3e..000000000 --- a/x/oracle/keeper/common/mock_keeper_test.go +++ /dev/null @@ -1,270 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ExocoreNetwork/exocore/x/oracle/keeper/common (interfaces: KeeperOracle) -// -// Generated by this command: -// -// mockgen -destination mock_keeper_test.go -package common github.com/ExocoreNetwork/exocore/x/oracle/keeper/common KeeperOracle -// - -// Package common is a generated GoMock package. -package common - -import ( - reflect "reflect" - - math "cosmossdk.io/math" - types "github.com/ExocoreNetwork/exocore/x/oracle/types" - types0 "github.com/cometbft/cometbft/abci/types" - types1 "github.com/cosmos/cosmos-sdk/types" - types2 "github.com/cosmos/cosmos-sdk/x/staking/types" - gomock "go.uber.org/mock/gomock" -) - -// MockKeeperOracle is a mock of KeeperOracle interface. -type MockKeeperOracle struct { - ctrl *gomock.Controller - recorder *MockKeeperOracleMockRecorder -} - -// MockKeeperOracleMockRecorder is the mock recorder for MockKeeperOracle. -type MockKeeperOracleMockRecorder struct { - mock *MockKeeperOracle -} - -// NewMockKeeperOracle creates a new mock instance. -func NewMockKeeperOracle(ctrl *gomock.Controller) *MockKeeperOracle { - mock := &MockKeeperOracle{ctrl: ctrl} - mock.recorder = &MockKeeperOracleMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockKeeperOracle) EXPECT() *MockKeeperOracleMockRecorder { - return m.recorder -} - -// GetAllRecentMsgAsMap mocks base method. -func (m *MockKeeperOracle) GetAllRecentMsgAsMap(arg0 types1.Context) map[int64][]*types.MsgItem { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAllRecentMsgAsMap", arg0) - ret0, _ := ret[0].(map[int64][]*types.MsgItem) - return ret0 -} - -// GetAllRecentMsgAsMap indicates an expected call of GetAllRecentMsgAsMap. -func (mr *MockKeeperOracleMockRecorder) GetAllRecentMsgAsMap(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllRecentMsgAsMap", reflect.TypeOf((*MockKeeperOracle)(nil).GetAllRecentMsgAsMap), arg0) -} - -// GetAllRecentParamsAsMap mocks base method. -func (m *MockKeeperOracle) GetAllRecentParamsAsMap(arg0 types1.Context) map[uint64]*types.Params { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAllRecentParamsAsMap", arg0) - ret0, _ := ret[0].(map[uint64]*types.Params) - return ret0 -} - -// GetAllRecentParamsAsMap indicates an expected call of GetAllRecentParamsAsMap. -func (mr *MockKeeperOracleMockRecorder) GetAllRecentParamsAsMap(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllRecentParamsAsMap", reflect.TypeOf((*MockKeeperOracle)(nil).GetAllRecentParamsAsMap), arg0) -} - -// GetIndexRecentMsg mocks base method. -func (m *MockKeeperOracle) GetIndexRecentMsg(arg0 types1.Context) (types.IndexRecentMsg, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetIndexRecentMsg", arg0) - ret0, _ := ret[0].(types.IndexRecentMsg) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// GetIndexRecentMsg indicates an expected call of GetIndexRecentMsg. -func (mr *MockKeeperOracleMockRecorder) GetIndexRecentMsg(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIndexRecentMsg", reflect.TypeOf((*MockKeeperOracle)(nil).GetIndexRecentMsg), arg0) -} - -// GetIndexRecentParams mocks base method. -func (m *MockKeeperOracle) GetIndexRecentParams(arg0 types1.Context) (types.IndexRecentParams, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetIndexRecentParams", arg0) - ret0, _ := ret[0].(types.IndexRecentParams) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// GetIndexRecentParams indicates an expected call of GetIndexRecentParams. -func (mr *MockKeeperOracleMockRecorder) GetIndexRecentParams(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIndexRecentParams", reflect.TypeOf((*MockKeeperOracle)(nil).GetIndexRecentParams), arg0) -} - -// GetLastTotalPower mocks base method. -func (m *MockKeeperOracle) GetLastTotalPower(arg0 types1.Context) math.Int { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetLastTotalPower", arg0) - ret0, _ := ret[0].(math.Int) - return ret0 -} - -// GetLastTotalPower indicates an expected call of GetLastTotalPower. -func (mr *MockKeeperOracleMockRecorder) GetLastTotalPower(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLastTotalPower", reflect.TypeOf((*MockKeeperOracle)(nil).GetLastTotalPower), arg0) -} - -// GetParams mocks base method. -func (m *MockKeeperOracle) GetParams(arg0 types1.Context) types.Params { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetParams", arg0) - ret0, _ := ret[0].(types.Params) - return ret0 -} - -// GetParams indicates an expected call of GetParams. -func (mr *MockKeeperOracleMockRecorder) GetParams(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetParams", reflect.TypeOf((*MockKeeperOracle)(nil).GetParams), arg0) -} - -// GetValidatorByConsAddr mocks base method. -func (m *MockKeeperOracle) GetValidatorByConsAddr(arg0 types1.Context, arg1 types1.ConsAddress) (types2.Validator, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetValidatorByConsAddr", arg0, arg1) - ret0, _ := ret[0].(types2.Validator) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// GetValidatorByConsAddr indicates an expected call of GetValidatorByConsAddr. -func (mr *MockKeeperOracleMockRecorder) GetValidatorByConsAddr(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValidatorByConsAddr", reflect.TypeOf((*MockKeeperOracle)(nil).GetValidatorByConsAddr), arg0, arg1) -} - -// GetValidatorUpdateBlock mocks base method. -func (m *MockKeeperOracle) GetValidatorUpdateBlock(arg0 types1.Context) (types.ValidatorUpdateBlock, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetValidatorUpdateBlock", arg0) - ret0, _ := ret[0].(types.ValidatorUpdateBlock) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// GetValidatorUpdateBlock indicates an expected call of GetValidatorUpdateBlock. -func (mr *MockKeeperOracleMockRecorder) GetValidatorUpdateBlock(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValidatorUpdateBlock", reflect.TypeOf((*MockKeeperOracle)(nil).GetValidatorUpdateBlock), arg0) -} - -// GetValidatorUpdates mocks base method. -func (m *MockKeeperOracle) GetValidatorUpdates(arg0 types1.Context) []types0.ValidatorUpdate { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetValidatorUpdates", arg0) - ret0, _ := ret[0].([]types0.ValidatorUpdate) - return ret0 -} - -// GetValidatorUpdates indicates an expected call of GetValidatorUpdates. -func (mr *MockKeeperOracleMockRecorder) GetValidatorUpdates(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValidatorUpdates", reflect.TypeOf((*MockKeeperOracle)(nil).GetValidatorUpdates), arg0) -} - -// IterateBondedValidatorsByPower mocks base method. -func (m *MockKeeperOracle) IterateBondedValidatorsByPower(arg0 types1.Context, arg1 func(int64, types2.ValidatorI) bool) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "IterateBondedValidatorsByPower", arg0, arg1) -} - -// IterateBondedValidatorsByPower indicates an expected call of IterateBondedValidatorsByPower. -func (mr *MockKeeperOracleMockRecorder) IterateBondedValidatorsByPower(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IterateBondedValidatorsByPower", reflect.TypeOf((*MockKeeperOracle)(nil).IterateBondedValidatorsByPower), arg0, arg1) -} - -// RemoveRecentMsg mocks base method. -func (m *MockKeeperOracle) RemoveRecentMsg(arg0 types1.Context, arg1 uint64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "RemoveRecentMsg", arg0, arg1) -} - -// RemoveRecentMsg indicates an expected call of RemoveRecentMsg. -func (mr *MockKeeperOracleMockRecorder) RemoveRecentMsg(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveRecentMsg", reflect.TypeOf((*MockKeeperOracle)(nil).RemoveRecentMsg), arg0, arg1) -} - -// RemoveRecentParams mocks base method. -func (m *MockKeeperOracle) RemoveRecentParams(arg0 types1.Context, arg1 uint64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "RemoveRecentParams", arg0, arg1) -} - -// RemoveRecentParams indicates an expected call of RemoveRecentParams. -func (mr *MockKeeperOracleMockRecorder) RemoveRecentParams(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveRecentParams", reflect.TypeOf((*MockKeeperOracle)(nil).RemoveRecentParams), arg0, arg1) -} - -// SetIndexRecentMsg mocks base method. -func (m *MockKeeperOracle) SetIndexRecentMsg(arg0 types1.Context, arg1 types.IndexRecentMsg) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetIndexRecentMsg", arg0, arg1) -} - -// SetIndexRecentMsg indicates an expected call of SetIndexRecentMsg. -func (mr *MockKeeperOracleMockRecorder) SetIndexRecentMsg(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetIndexRecentMsg", reflect.TypeOf((*MockKeeperOracle)(nil).SetIndexRecentMsg), arg0, arg1) -} - -// SetIndexRecentParams mocks base method. -func (m *MockKeeperOracle) SetIndexRecentParams(arg0 types1.Context, arg1 types.IndexRecentParams) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetIndexRecentParams", arg0, arg1) -} - -// SetIndexRecentParams indicates an expected call of SetIndexRecentParams. -func (mr *MockKeeperOracleMockRecorder) SetIndexRecentParams(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetIndexRecentParams", reflect.TypeOf((*MockKeeperOracle)(nil).SetIndexRecentParams), arg0, arg1) -} - -// SetRecentMsg mocks base method. -func (m *MockKeeperOracle) SetRecentMsg(arg0 types1.Context, arg1 types.RecentMsg) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetRecentMsg", arg0, arg1) -} - -// SetRecentMsg indicates an expected call of SetRecentMsg. -func (mr *MockKeeperOracleMockRecorder) SetRecentMsg(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetRecentMsg", reflect.TypeOf((*MockKeeperOracle)(nil).SetRecentMsg), arg0, arg1) -} - -// SetRecentParams mocks base method. -func (m *MockKeeperOracle) SetRecentParams(arg0 types1.Context, arg1 types.RecentParams) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetRecentParams", arg0, arg1) -} - -// SetRecentParams indicates an expected call of SetRecentParams. -func (mr *MockKeeperOracleMockRecorder) SetRecentParams(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetRecentParams", reflect.TypeOf((*MockKeeperOracle)(nil).SetRecentParams), arg0, arg1) -} - -// SetValidatorUpdateBlock mocks base method. -func (m *MockKeeperOracle) SetValidatorUpdateBlock(arg0 types1.Context, arg1 types.ValidatorUpdateBlock) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetValidatorUpdateBlock", arg0, arg1) -} - -// SetValidatorUpdateBlock indicates an expected call of SetValidatorUpdateBlock. -func (mr *MockKeeperOracleMockRecorder) SetValidatorUpdateBlock(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetValidatorUpdateBlock", reflect.TypeOf((*MockKeeperOracle)(nil).SetValidatorUpdateBlock), arg0, arg1) -} diff --git a/x/oracle/keeper/common/mock_validator_test.go b/x/oracle/keeper/common/mock_validator_test.go deleted file mode 100644 index 6e84bfe0a..000000000 --- a/x/oracle/keeper/common/mock_validator_test.go +++ /dev/null @@ -1,343 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/cosmos/cosmos-sdk/x/staking/types (interfaces: ValidatorI) -// -// Generated by this command: -// -// mockgen -destination mock_validator_test.go -package common github.com/cosmos/cosmos-sdk/x/staking/types ValidatorI -// - -// Package common is a generated GoMock package. -package common - -import ( - reflect "reflect" - - math "cosmossdk.io/math" - crypto "github.com/cometbft/cometbft/proto/tendermint/crypto" - types "github.com/cosmos/cosmos-sdk/crypto/types" - types0 "github.com/cosmos/cosmos-sdk/types" - types1 "github.com/cosmos/cosmos-sdk/x/staking/types" - gomock "go.uber.org/mock/gomock" -) - -// MockValidatorI is a mock of ValidatorI interface. -type MockValidatorI struct { - ctrl *gomock.Controller - recorder *MockValidatorIMockRecorder -} - -// MockValidatorIMockRecorder is the mock recorder for MockValidatorI. -type MockValidatorIMockRecorder struct { - mock *MockValidatorI -} - -// NewMockValidatorI creates a new mock instance. -func NewMockValidatorI(ctrl *gomock.Controller) *MockValidatorI { - mock := &MockValidatorI{ctrl: ctrl} - mock.recorder = &MockValidatorIMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockValidatorI) EXPECT() *MockValidatorIMockRecorder { - return m.recorder -} - -// ConsPubKey mocks base method. -func (m *MockValidatorI) ConsPubKey() (types.PubKey, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ConsPubKey") - ret0, _ := ret[0].(types.PubKey) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ConsPubKey indicates an expected call of ConsPubKey. -func (mr *MockValidatorIMockRecorder) ConsPubKey() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConsPubKey", reflect.TypeOf((*MockValidatorI)(nil).ConsPubKey)) -} - -// GetBondedTokens mocks base method. -func (m *MockValidatorI) GetBondedTokens() math.Int { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBondedTokens") - ret0, _ := ret[0].(math.Int) - return ret0 -} - -// GetBondedTokens indicates an expected call of GetBondedTokens. -func (mr *MockValidatorIMockRecorder) GetBondedTokens() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBondedTokens", reflect.TypeOf((*MockValidatorI)(nil).GetBondedTokens)) -} - -// GetCommission mocks base method. -func (m *MockValidatorI) GetCommission() math.LegacyDec { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCommission") - ret0, _ := ret[0].(math.LegacyDec) - return ret0 -} - -// GetCommission indicates an expected call of GetCommission. -func (mr *MockValidatorIMockRecorder) GetCommission() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCommission", reflect.TypeOf((*MockValidatorI)(nil).GetCommission)) -} - -// GetConsAddr mocks base method. -func (m *MockValidatorI) GetConsAddr() (types0.ConsAddress, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetConsAddr") - ret0, _ := ret[0].(types0.ConsAddress) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetConsAddr indicates an expected call of GetConsAddr. -func (mr *MockValidatorIMockRecorder) GetConsAddr() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConsAddr", reflect.TypeOf((*MockValidatorI)(nil).GetConsAddr)) -} - -// GetConsensusPower mocks base method. -func (m *MockValidatorI) GetConsensusPower(arg0 math.Int) int64 { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetConsensusPower", arg0) - ret0, _ := ret[0].(int64) - return ret0 -} - -// GetConsensusPower indicates an expected call of GetConsensusPower. -func (mr *MockValidatorIMockRecorder) GetConsensusPower(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConsensusPower", reflect.TypeOf((*MockValidatorI)(nil).GetConsensusPower), arg0) -} - -// GetDelegatorShares mocks base method. -func (m *MockValidatorI) GetDelegatorShares() math.LegacyDec { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDelegatorShares") - ret0, _ := ret[0].(math.LegacyDec) - return ret0 -} - -// GetDelegatorShares indicates an expected call of GetDelegatorShares. -func (mr *MockValidatorIMockRecorder) GetDelegatorShares() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDelegatorShares", reflect.TypeOf((*MockValidatorI)(nil).GetDelegatorShares)) -} - -// GetMinSelfDelegation mocks base method. -func (m *MockValidatorI) GetMinSelfDelegation() math.Int { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetMinSelfDelegation") - ret0, _ := ret[0].(math.Int) - return ret0 -} - -// GetMinSelfDelegation indicates an expected call of GetMinSelfDelegation. -func (mr *MockValidatorIMockRecorder) GetMinSelfDelegation() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMinSelfDelegation", reflect.TypeOf((*MockValidatorI)(nil).GetMinSelfDelegation)) -} - -// GetMoniker mocks base method. -func (m *MockValidatorI) GetMoniker() string { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetMoniker") - ret0, _ := ret[0].(string) - return ret0 -} - -// GetMoniker indicates an expected call of GetMoniker. -func (mr *MockValidatorIMockRecorder) GetMoniker() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMoniker", reflect.TypeOf((*MockValidatorI)(nil).GetMoniker)) -} - -// GetOperator mocks base method. -func (m *MockValidatorI) GetOperator() types0.ValAddress { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetOperator") - ret0, _ := ret[0].(types0.ValAddress) - return ret0 -} - -// GetOperator indicates an expected call of GetOperator. -func (mr *MockValidatorIMockRecorder) GetOperator() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOperator", reflect.TypeOf((*MockValidatorI)(nil).GetOperator)) -} - -// GetStatus mocks base method. -func (m *MockValidatorI) GetStatus() types1.BondStatus { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetStatus") - ret0, _ := ret[0].(types1.BondStatus) - return ret0 -} - -// GetStatus indicates an expected call of GetStatus. -func (mr *MockValidatorIMockRecorder) GetStatus() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatus", reflect.TypeOf((*MockValidatorI)(nil).GetStatus)) -} - -// GetTokens mocks base method. -func (m *MockValidatorI) GetTokens() math.Int { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTokens") - ret0, _ := ret[0].(math.Int) - return ret0 -} - -// GetTokens indicates an expected call of GetTokens. -func (mr *MockValidatorIMockRecorder) GetTokens() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTokens", reflect.TypeOf((*MockValidatorI)(nil).GetTokens)) -} - -// IsBonded mocks base method. -func (m *MockValidatorI) IsBonded() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IsBonded") - ret0, _ := ret[0].(bool) - return ret0 -} - -// IsBonded indicates an expected call of IsBonded. -func (mr *MockValidatorIMockRecorder) IsBonded() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsBonded", reflect.TypeOf((*MockValidatorI)(nil).IsBonded)) -} - -// IsJailed mocks base method. -func (m *MockValidatorI) IsJailed() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IsJailed") - ret0, _ := ret[0].(bool) - return ret0 -} - -// IsJailed indicates an expected call of IsJailed. -func (mr *MockValidatorIMockRecorder) IsJailed() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsJailed", reflect.TypeOf((*MockValidatorI)(nil).IsJailed)) -} - -// IsUnbonded mocks base method. -func (m *MockValidatorI) IsUnbonded() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IsUnbonded") - ret0, _ := ret[0].(bool) - return ret0 -} - -// IsUnbonded indicates an expected call of IsUnbonded. -func (mr *MockValidatorIMockRecorder) IsUnbonded() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsUnbonded", reflect.TypeOf((*MockValidatorI)(nil).IsUnbonded)) -} - -// IsUnbonding mocks base method. -func (m *MockValidatorI) IsUnbonding() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IsUnbonding") - ret0, _ := ret[0].(bool) - return ret0 -} - -// IsUnbonding indicates an expected call of IsUnbonding. -func (mr *MockValidatorIMockRecorder) IsUnbonding() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsUnbonding", reflect.TypeOf((*MockValidatorI)(nil).IsUnbonding)) -} - -// SharesFromTokens mocks base method. -func (m *MockValidatorI) SharesFromTokens(arg0 math.Int) (math.LegacyDec, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SharesFromTokens", arg0) - ret0, _ := ret[0].(math.LegacyDec) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SharesFromTokens indicates an expected call of SharesFromTokens. -func (mr *MockValidatorIMockRecorder) SharesFromTokens(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SharesFromTokens", reflect.TypeOf((*MockValidatorI)(nil).SharesFromTokens), arg0) -} - -// SharesFromTokensTruncated mocks base method. -func (m *MockValidatorI) SharesFromTokensTruncated(arg0 math.Int) (math.LegacyDec, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SharesFromTokensTruncated", arg0) - ret0, _ := ret[0].(math.LegacyDec) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SharesFromTokensTruncated indicates an expected call of SharesFromTokensTruncated. -func (mr *MockValidatorIMockRecorder) SharesFromTokensTruncated(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SharesFromTokensTruncated", reflect.TypeOf((*MockValidatorI)(nil).SharesFromTokensTruncated), arg0) -} - -// TmConsPublicKey mocks base method. -func (m *MockValidatorI) TmConsPublicKey() (crypto.PublicKey, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "TmConsPublicKey") - ret0, _ := ret[0].(crypto.PublicKey) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// TmConsPublicKey indicates an expected call of TmConsPublicKey. -func (mr *MockValidatorIMockRecorder) TmConsPublicKey() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TmConsPublicKey", reflect.TypeOf((*MockValidatorI)(nil).TmConsPublicKey)) -} - -// TokensFromShares mocks base method. -func (m *MockValidatorI) TokensFromShares(arg0 math.LegacyDec) math.LegacyDec { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "TokensFromShares", arg0) - ret0, _ := ret[0].(math.LegacyDec) - return ret0 -} - -// TokensFromShares indicates an expected call of TokensFromShares. -func (mr *MockValidatorIMockRecorder) TokensFromShares(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TokensFromShares", reflect.TypeOf((*MockValidatorI)(nil).TokensFromShares), arg0) -} - -// TokensFromSharesRoundUp mocks base method. -func (m *MockValidatorI) TokensFromSharesRoundUp(arg0 math.LegacyDec) math.LegacyDec { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "TokensFromSharesRoundUp", arg0) - ret0, _ := ret[0].(math.LegacyDec) - return ret0 -} - -// TokensFromSharesRoundUp indicates an expected call of TokensFromSharesRoundUp. -func (mr *MockValidatorIMockRecorder) TokensFromSharesRoundUp(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TokensFromSharesRoundUp", reflect.TypeOf((*MockValidatorI)(nil).TokensFromSharesRoundUp), arg0) -} - -// TokensFromSharesTruncated mocks base method. -func (m *MockValidatorI) TokensFromSharesTruncated(arg0 math.LegacyDec) math.LegacyDec { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "TokensFromSharesTruncated", arg0) - ret0, _ := ret[0].(math.LegacyDec) - return ret0 -} - -// TokensFromSharesTruncated indicates an expected call of TokensFromSharesTruncated. -func (mr *MockValidatorIMockRecorder) TokensFromSharesTruncated(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TokensFromSharesTruncated", reflect.TypeOf((*MockValidatorI)(nil).TokensFromSharesTruncated), arg0) -} diff --git a/x/oracle/keeper/common/types.go b/x/oracle/keeper/common/types.go deleted file mode 100644 index 993c20674..000000000 --- a/x/oracle/keeper/common/types.go +++ /dev/null @@ -1,98 +0,0 @@ -package common - -import ( - "math/big" - "sort" - - "github.com/ExocoreNetwork/exocore/x/oracle/types" -) - -var ( - // maxNonce indicates how many messages a validator can submit in a single round to offer price - // current we use this as a mock distance - MaxNonce int32 = 3 - - // these two threshold value used to set the threshold to tell when the price had come to consensus and was able to get a final price of that round - ThresholdA int32 = 2 - ThresholdB int32 = 3 - - // maxDetId each validator can submit, so the calculator can cache maximum of maxDetId*count(validators) values, this is for resistance of malicious validator submmiting invalid detId - MaxDetID int32 = 5 - - // for each token at most MaxSizePrices round of prices will be keep in store - MaxSizePrices = 100 - - // consensus mode: v1: as soon as possbile - Mode types.ConsensusMode = types.ConsensusModeASAP -) - -type Set[T comparable] struct { - size int - slice []T -} - -func (s *Set[T]) Copy() *Set[T] { - ret := NewSet[T](s.Length()) - ret.slice = append(ret.slice, s.slice...) - return ret -} - -func (s *Set[T]) Add(value T) bool { - if len(s.slice) == s.size { - return false - } - for _, v := range s.slice { - if v == value { - return false - } - } - s.slice = append(s.slice, value) - return true -} - -func (s *Set[T]) Has(value T) bool { - for _, v := range s.slice { - if v == value { - return true - } - } - return false -} - -func (s *Set[T]) Length() int { - return s.size -} - -func NewSet[T comparable](length int) *Set[T] { - return &Set[T]{ - size: length, - slice: make([]T, 0, length), - } -} - -func ExceedsThreshold(power *big.Int, totalPower *big.Int) bool { - return new(big.Int).Mul(power, big.NewInt(int64(ThresholdB))).Cmp(new(big.Int).Mul(totalPower, big.NewInt(int64(ThresholdA)))) > 0 -} - -type BigIntList []*big.Int - -func (b BigIntList) Len() int { - return len(b) -} - -func (b BigIntList) Less(i, j int) bool { - return b[i].Cmp(b[j]) < 0 -} - -func (b BigIntList) Swap(i, j int) { - b[i], b[j] = b[j], b[i] -} - -func (b BigIntList) Median() *big.Int { - sort.Sort(b) - l := len(b) - if l%2 == 1 { - return b[l/2] - } - return new(big.Int).Div(new(big.Int).Add(b[l/2], b[l/2-1]), big.NewInt(2)) -} diff --git a/x/oracle/keeper/feedermanagement/aggregator.go b/x/oracle/keeper/feedermanagement/aggregator.go new file mode 100644 index 000000000..dd38d45db --- /dev/null +++ b/x/oracle/keeper/feedermanagement/aggregator.go @@ -0,0 +1,491 @@ +package feedermanagement + +import ( + "fmt" + "math/big" + "reflect" + "slices" + + "golang.org/x/exp/maps" +) + +type sourceChecker interface { + IsDeterministic(sourceID int64) (bool, error) +} + +func newAggregator(t *threshold, algo AggAlgorithm) *aggregator { + return &aggregator{ + t: t, + finalPrice: nil, + v: newRecordsValidators(), + ds: newRecordsDSs(t), + algo: algo, + } +} + +func (a *aggregator) Equals(a2 *aggregator) bool { + if a == nil || a2 == nil { + return a == a2 + } + + if !reflect.DeepEqual(a.finalPrice, a2.finalPrice) { + return false + } + + if !a.t.Equals(a2.t) { + return false + } + + if !a.v.Equals(a2.v) { + return false + } + + if !a.ds.Equals(a2.ds) { + return false + } + + if !a.algo.Equals(a2.algo) { + return false + } + + return true +} + +func (a *aggregator) CopyForCheckTx() *aggregator { + if a == nil { + return nil + } + var finalPrice *PriceResult + if a.finalPrice != nil { + tmp := *a.finalPrice + finalPrice = &tmp + } + return &aggregator{ + t: a.t.Cpy(), + finalPrice: finalPrice, + v: a.v.Cpy(), + ds: a.ds.Cpy(), + algo: a.algo, + } +} + +func (a *aggregator) GetFinalPrice() (*PriceResult, bool) { + if a.finalPrice != nil { + return a.finalPrice, true + } + if !a.exceedsThreshold() { + return nil, false + } + finalPrice, ok := a.v.GetFinalPrice(a.algo) + if ok { + a.finalPrice = finalPrice + } + return finalPrice, ok +} + +func (a *aggregator) RecordMsg(msg *MsgItem) error { + _, err := a.v.RecordMsg(msg) + return err +} + +// AddMsg records the message in a.v and do aggregation in a.ds +func (a *aggregator) AddMsg(msg *MsgItem) error { + // record into recordsValidators, validation for duplication + addedMsg, err := a.v.RecordMsg(msg) + // all prices failed to be recorded + if err != nil { + return fmt.Errorf("failed to add quote, error:%w", err) + } + // add into recordsDSs for DS aggregation + for _, ps := range addedMsg.PriceSources { + if ps.deterministic { + if a.ds.AddPriceSource(ps, msg.Power, msg.Validator) { + finalPrice, ok := a.ds.GetFinalPriceForSourceID(ps.sourceID) + if ok { + a.v.UpdateFinalPriceForDS(ps.sourceID, finalPrice) + } + } + } + } + return nil +} + +// TODO: V2: the accumulatedPower should corresponding to all valid validators which provides all sources required by rules(defined in oracle.Params) +func (a *aggregator) exceedsThreshold() bool { + return a.t.Exceeds(a.v.accumulatedPower) +} + +func newRecordsValidators() *recordsValidators { + return &recordsValidators{ + finalPrice: nil, + accumulatedPower: big.NewInt(0), + records: make(map[string]*priceValidator), + } +} + +func (rv *recordsValidators) Equals(rv2 *recordsValidators) bool { + if rv == nil || rv2 == nil { + return rv == rv2 + } + + if !reflect.DeepEqual(rv.finalPrice, rv2.finalPrice) { + return false + } + if rv.accumulatedPower.Cmp(rv2.accumulatedPower) != 0 { + return false + } + if !reflect.DeepEqual(rv.finalPrices, rv2.finalPrices) { + return false + } + if len(rv.records) != len(rv2.records) { + return false + } + // safe to range map, map compare + for k, v := range rv.records { + if v2, ok := rv2.records[k]; !ok || !v.Equals(v2) { + return false + } + } + + return true +} + +func (rv *recordsValidators) Cpy() *recordsValidators { + if rv == nil { + return nil + } + var finalPrice *PriceResult + if rv.finalPrice != nil { + tmp := *rv.finalPrice + finalPrice = &tmp + } + var finalPrices map[string]*PriceResult + if len(rv.finalPrices) > 0 { + finalPrices = make(map[string]*PriceResult) + // safe to range map, map copy + for v, p := range rv.finalPrices { + price := *p + finalPrices[v] = &price + } + } + records := make(map[string]*priceValidator) + // safe to range map, map copy + for v, pv := range rv.records { + records[v] = pv.Cpy() + } + return &recordsValidators{ + finalPrice: finalPrice, + finalPrices: finalPrices, + accumulatedPower: new(big.Int).Set(rv.accumulatedPower), + records: records, + } +} + +func (rv *recordsValidators) RecordMsg(msg *MsgItem) (*MsgItem, error) { + record, ok := rv.records[msg.Validator] + if !ok { + record = newPriceValidator(msg.Validator, msg.Power) + } + rets := &MsgItem{ + FeederID: msg.FeederID, + Validator: msg.Validator, + Power: msg.Power, + PriceSources: make([]*priceSource, 0), + } + + updated, added, err := record.TryAddPriceSources(msg.PriceSources) + if err != nil { + return nil, fmt.Errorf("failed to record msg, error:%w", err) + } + record.ApplyAddedPriceSources(updated) + if !ok { + rv.records[msg.Validator] = record + rv.accumulatedPower = new(big.Int).Add(rv.accumulatedPower, msg.Power) + } + rets.PriceSources = added + return rets, nil +} + +func (rv *recordsValidators) GetValidatorQuotePricesForSourceID(validator string, sourceID int64) ([]*PriceInfo, bool) { + record, ok := rv.records[validator] + if !ok { + return nil, false + } + pSource, ok := record.priceSources[sourceID] + if !ok { + return nil, false + } + return pSource.prices, true +} + +func (rv *recordsValidators) GetFinalPrice(algo AggAlgorithm) (*PriceResult, bool) { + if rv.finalPrice != nil { + return rv.finalPrice, true + } + if prices, ok := rv.GetFinalPriceForValidators(algo); ok { + keySlice := make([]string, 0, len(prices)) + // safe to range map, this is used to generate a sorted keySlice + for validator := range prices { + keySlice = append(keySlice, validator) + } + slices.Sort(keySlice) + algo.Reset() + // keys are sorted to make sure algo.Add is called in the same order for deterministic result + for _, validator := range keySlice { + if !algo.Add(prices[validator]) { + algo.Reset() + return nil, false + } + } + rv.finalPrice = algo.GetResult() + return rv.finalPrice, rv.finalPrice != nil + } + return nil, false +} + +func (rv *recordsValidators) GetFinalPriceForValidators(algo AggAlgorithm) (map[string]*PriceResult, bool) { + if len(rv.finalPrices) > 0 { + return rv.finalPrices, true + } + ret := make(map[string]*PriceResult) + // the order here is not important, so it's safe to range map here + // we only return true when all validators have finalPrice + for validator, pv := range rv.records { + finalPrice, ok := pv.GetFinalPrice(algo) + if !ok { + return nil, false + } + ret[validator] = finalPrice + } + rv.finalPrices = ret + return ret, true +} + +func (rv *recordsValidators) UpdateFinalPriceForDS(sourceID int64, finalPrice *PriceResult) bool { + if finalPrice == nil { + return false + } + // it's safe to range map here, order does not matter + for _, record := range rv.records { + // ignore the fail cases for updating some pv' DS finalPrice + record.UpdateFinalPriceForDS(sourceID, finalPrice) + } + return true +} + +func newRecordsDSs(t *threshold) *recordsDSs { + return &recordsDSs{ + t: t, + dsMap: make(map[int64]*recordsDS), + } +} + +func (rdss *recordsDSs) Equals(rdss2 *recordsDSs) bool { + if rdss == nil || rdss2 == nil { + return rdss == rdss2 + } + + if !rdss.t.Equals(rdss2.t) { + return false + } + if len(rdss.dsMap) != len(rdss2.dsMap) { + return false + } + // safe to range map, map compare + for k, v := range rdss.dsMap { + if v2, ok := rdss2.dsMap[k]; !ok || !v.Equals(v2) { + return false + } + } + + return true +} + +func (rdss *recordsDSs) Cpy() *recordsDSs { + if rdss == nil { + return nil + } + dsMap := make(map[int64]*recordsDS) + // safe to range map, map copy + for id, r := range rdss.dsMap { + dsMap[id] = r.Cpy() + } + return &recordsDSs{ + t: rdss.t.Cpy(), + dsMap: dsMap, + } +} + +// AddPriceSource adds prices for DS sources +func (rdss *recordsDSs) AddPriceSource(ps *priceSource, power *big.Int, validator string) bool { + if !ps.deterministic { + return false + } + price, ok := rdss.dsMap[ps.sourceID] + if !ok { + price = newRecordsDS() + rdss.dsMap[ps.sourceID] = price + } + for _, p := range ps.prices { + price.AddPrice(&PricePower{ + Price: p, + Power: power, + Validators: map[string]struct{}{validator: {}}, + }) + } + return true +} + +func (rdss *recordsDSs) GetFinalPriceForSourceID(sourceID int64) (*PriceResult, bool) { + rds, ok := rdss.dsMap[sourceID] + if !ok { + return nil, false + } + return rds.GetFinalPrice(rdss.t) +} + +func (rdss *recordsDSs) GetFinalPriceForSources() (map[int64]*PriceResult, bool) { + ret := make(map[int64]*PriceResult) + // safe to range map, the result is a map of 'all or none' + for sourceID, rds := range rdss.dsMap { + if finalPrice, ok := rds.GetFinalPrice(rdss.t); ok { + ret[sourceID] = finalPrice + } else { + return nil, false + } + } + return ret, true +} + +func (rdss *recordsDSs) GetFinalDetIDForSourceID(sourceID int64) string { + if rds, ok := rdss.dsMap[sourceID]; ok { + if rds.finalPrice != nil { + return rds.finalDetID + } + if _, ok := rds.GetFinalPrice(rdss.t); ok { + return rds.finalDetID + } + } + return "" +} + +func newRecordsDS() *recordsDS { + return &recordsDS{ + finalPrice: nil, + validators: make(map[string]struct{}), + finalDetID: "", + accumulatedPowers: big.NewInt(0), + records: make([]*PricePower, 0), + } +} + +func (rds *recordsDS) Equals(rds2 *recordsDS) bool { + if rds == nil || rds2 == nil { + return rds == rds2 + } + + if !reflect.DeepEqual(rds.finalPrice, rds2.finalPrice) { + return false + } + if rds.finalDetID != rds2.finalDetID { + return false + } + if rds.accumulatedPowers.Cmp(rds2.accumulatedPowers) != 0 { + return false + } + if !reflect.DeepEqual(rds.validators, rds2.validators) { + return false + } + if len(rds.records) != len(rds2.records) { + return false + } + for i, r := range rds.records { + if !r.Equals(rds2.records[i]) { + return false + } + } + + return true +} + +func (rds *recordsDS) Cpy() *recordsDS { + if rds == nil { + return nil + } + var finalPrice *PriceResult + if rds.finalPrice != nil { + tmp := *rds.finalPrice + finalPrice = &tmp + } + validators := make(map[string]struct{}) + // safe to range map, map copy + for v := range rds.validators { + validators[v] = struct{}{} + } + records := make([]*PricePower, 0, len(rds.records)) + for _, r := range rds.records { + records = append(records, r.Cpy()) + } + return &recordsDS{ + finalPrice: finalPrice, + finalDetID: rds.finalDetID, + accumulatedPowers: new(big.Int).Set(rds.accumulatedPowers), + validators: validators, + records: records, + } +} + +func (rds *recordsDS) GetFinalPrice(t *threshold) (*PriceResult, bool) { + if rds.finalPrice != nil { + return rds.finalPrice, true + } + if t.Exceeds(rds.accumulatedPowers) { + l := len(rds.records) + for i := l - 1; i >= 0; i-- { + pPower := rds.records[i] + if t.Exceeds(pPower.Power) { + rds.finalPrice = pPower.Price.PriceResult() + rds.finalDetID = pPower.Price.DetID + return rds.finalPrice, true + } + } + } + return nil, false +} + +// AddPrice adds a price into recordsDS +// NOTE: the input PricePower should be filtered by recordsValidators before calling this function to make sure the price is not duplicated by detID +func (rds *recordsDS) AddPrice(p *PricePower) { + validator := maps.Keys(p.Validators)[0] + i := 0 + l := len(rds.records) + for ; i < l; i++ { + record := rds.records[i] + if record.Price.EqualDS(p.Price) { + if _, ok := record.Validators[validator]; !ok { + record.Power.Add(record.Power, p.Power) + record.Validators[validator] = struct{}{} + } + break + } + } + if i >= l { + p = p.Cpy() + for i = 0; i < l; i++ { + record := rds.records[i] + if p.Price.DetID <= record.Price.DetID { + // insert before i + combined := append([]*PricePower{p}, rds.records[i:]...) + rds.records = append(rds.records[:i], combined...) + break + } + } + if i >= l { + rds.records = append(rds.records, p) + } + } + if _, ok := rds.validators[validator]; !ok { + rds.accumulatedPowers.Add(rds.accumulatedPowers, p.Power) + rds.validators[validator] = struct{}{} + } +} diff --git a/x/oracle/keeper/feedermanagement/aggregator_test.go b/x/oracle/keeper/feedermanagement/aggregator_test.go new file mode 100644 index 000000000..4d24dcc9b --- /dev/null +++ b/x/oracle/keeper/feedermanagement/aggregator_test.go @@ -0,0 +1,336 @@ +package feedermanagement + +import ( + "testing" + + oracletypes "github.com/ExocoreNetwork/exocore/x/oracle/types" + . "github.com/smartystreets/goconvey/convey" + gomock "go.uber.org/mock/gomock" +) + +func TestAggregation(t *testing.T) { + Convey("aggregation", t, func() { + Convey("add priceSouce in priceSource", func() { + ps := newPriceSource(1, true) + Convey("add first priceSource, success", func() { + psAdded, err := ps.Add(ps1) + So(psAdded, ShouldResemble, ps1) + So(err, ShouldBeNil) + _, ok := ps.detIDs["1"] + So(ok, ShouldBeTrue) + Convey("add different sourceID, reject", func() { + psAdded, err := ps.Add(ps3) + So(psAdded, ShouldBeNil) + So(err, ShouldNotBeNil) + }) + Convey("add same sourceID with same DetID, reject", func() { + psAdded, err := ps.Add(ps1) + So(psAdded, ShouldBeNil) + So(err, ShouldNotBeNil) + }) + Convey("add same sourceID with different DetID, success", func() { + psAdded, err := ps.Add(ps2) + So(psAdded, ShouldResemble, ps2) + So(err, ShouldBeNil) + _, ok := ps.detIDs["2"] + So(ok, ShouldBeTrue) + }) + Convey("add same sourceID with different DetID, duplicated input, return the added one value", func() { + psAdded, err := ps.Add(ps4) + So(psAdded, ShouldResemble, ps2) + So(err, ShouldBeNil) + }) + }) + }) + Convey("add priceSource in priceValidator", func() { + // Try + pv := newPriceValidator("validator1", big1) + Convey("add source1 with 2 detIDs, try:success", func() { + // duplicated detID=1 in ps1_2 will be removed in returned 'added' + updated, added, err := pv.TryAddPriceSources([]*priceSource{ps1_2, ps2}) + So(updated, ShouldResemble, map[int64]*priceSource{1: ps5}) + So(added, ShouldResemble, []*priceSource{ps1, ps2}) + So(err, ShouldBeNil) + // 'try' will not actually update pv + So(pv.priceSources, ShouldHaveLength, 0) + Convey("apply changes, success", func() { + pv.ApplyAddedPriceSources(updated) + So(pv.priceSources, ShouldHaveLength, 1) + So(pv.priceSources, ShouldResemble, map[int64]*priceSource{1: ps5}) + Convey("add source1 with detID 3, try:success", func() { + updated, added, err := pv.TryAddPriceSources([]*priceSource{ps3_2}) + So(updated, ShouldResemble, map[int64]*priceSource{1: ps6}) + So(added, ShouldResemble, []*priceSource{ps3_2}) + So(err, ShouldBeNil) + So(pv.priceSources[1].prices, ShouldHaveLength, 2) + Convey("apply changes, success", func() { + pv.ApplyAddedPriceSources(updated) + So(pv.priceSources[1].prices, ShouldHaveLength, 3) + }) + }) + }) + }) + }) + Convey("record msgs in recordsValidators", func() { + rv := newRecordsValidators() + // TODO: multiple sources(for V2) + Convey("record valid msg, success", func() { + msgAdded, err := rv.RecordMsg(msgItem1) + So(msgAdded, ShouldResemble, msgItem1_2) + So(err, ShouldBeNil) + So(rv.records["validator1"], ShouldResemble, &priceValidator{validator: "validator1", power: big1, priceSources: map[int64]*priceSource{1: ps5}}) + So(rv.accumulatedPower, ShouldResemble, big1) + Convey("record duplicated msg, reject", func() { + msgAdded, err := rv.RecordMsg(msgItem1_3) + So(msgAdded, ShouldBeNil) + So(err, ShouldNotBeNil) + }) + Convey("record msg from another validator, success", func() { + msgAdded, err := rv.RecordMsg(msgItem2) + So(msgAdded, ShouldResemble, msgItem2_2) + So(err, ShouldBeNil) + So(rv.records["validator2"], ShouldResemble, &priceValidator{validator: "validator2", power: big1, priceSources: map[int64]*priceSource{1: ps5}}) + So(rv.accumulatedPower, ShouldResemble, big2) + Convey("calculate final price without confirmed ds price, fail", func() { + finalPrice, err := rv.GetFinalPrice(defaultAggMedian) + So(finalPrice, ShouldBeNil) + So(err, ShouldBeFalse) + }) + Convey("calculate final price with confirmed ds price, success", func() { + Convey("update final price of ds, success", func() { + So(rv.records["validator1"].priceSources[1].finalPrice, ShouldBeNil) + rv.UpdateFinalPriceForDS(1, pr1) + So(rv.records["validator1"].priceSources[1].finalPrice, ShouldResemble, pr1) + So(rv.records["validator2"].priceSources[1].finalPrice, ShouldResemble, pr1) + finalPrice, err := rv.GetFinalPrice(defaultAggMedian) + So(finalPrice, ShouldResemble, pr1_2) + So(err, ShouldBeTrue) + }) + }) + }) + }) + }) + Convey("add msgs in recordsDS", func() { + rds := newRecordsDS() + + Convey("add first msg with v1-power-1 for detID2, success", func() { + rds.AddPrice(pw2) + So(rds.accumulatedPowers, ShouldResemble, big1) + So(rds.validators["validator1"], ShouldNotBeNil) + So(rds.records, ShouldHaveLength, 1) + So(rds.records[0], ShouldResemble, pw2) + Convey("add second msg with v1-power- 1 for detID1", func() { + rds.AddPrice(pw1) + So(rds.accumulatedPowers, ShouldResemble, big1) + So(rds.records, ShouldHaveLength, 2) + So(rds.records[0], ShouldResemble, pw1) + So(rds.records[1], ShouldResemble, pw2) + Convey("add 3rd msg with v2-power-1 for detID2", func() { + rds.AddPrice(pw3) + So(rds.accumulatedPowers, ShouldResemble, big2) + So(rds.validators["validator2"], ShouldNotBeNil) + So(rds.records, ShouldHaveLength, 2) + So(rds.records[0], ShouldResemble, pw1) + So(rds.records[1], ShouldResemble, pw2_2) + finalPrice, ok := rds.GetFinalPrice(th) + So(finalPrice, ShouldBeNil) + So(ok, ShouldBeFalse) + Convey("add 4th msg with v3-power-1 for detID2", func() { + rds.AddPrice(pw4) + So(rds.accumulatedPowers, ShouldResemble, big3) + So(rds.validators["validator3"], ShouldNotBeNil) + So(rds.records, ShouldHaveLength, 2) + So(rds.records[1], ShouldResemble, pw3_2) + Convey("get finalPrice, success", func() { + finalPrice, ok = rds.GetFinalPrice(th) + So(finalPrice, ShouldResemble, &PriceResult{ + Price: "999", + Decimal: 8, + DetID: "2", + Timestamp: timestamp, + }) + So(ok, ShouldBeTrue) + Convey("add 5th msg with v4-power-1 for detID2", func() { + rds.AddPrice(pw5) + So(rds.accumulatedPowers, ShouldResemble, big4) + So(rds.validators["validator4"], ShouldNotBeNil) + So(rds.records, ShouldHaveLength, 2) + finalPrice, ok = rds.GetFinalPrice(th) + So(finalPrice, ShouldResemble, &PriceResult{ + Price: "999", + Decimal: 8, + DetID: "2", + Timestamp: timestamp, + }) + }) + }) + }) + }) + }) + }) + }) + Convey("add msgs in recordsDSs", func() { + rdss := newRecordsDSs(th) + Convey("add 3 same detId=1 prices from v1,v2,v3", func() { + rdss.AddPriceSource(ps1, big1, "validator1") + rdss.AddPriceSource(ps1, big1, "validator2") + finalPrice, ok := rdss.GetFinalPriceForSourceID(1) + So(finalPrice, ShouldBeNil) + So(ok, ShouldBeFalse) + rdss.AddPriceSource(ps1, big1, "validator3") + finalPrice, ok = rdss.GetFinalPriceForSourceID(1) + So(finalPrice, ShouldNotBeNil) + So(finalPrice, ShouldResemble, ps1.prices[0].PriceResult()) + So(ok, ShouldBeTrue) + }) + Convey("add 3 same detId=1 prices and 2 same detID=2 prices from v1,v2,v3", func() { + rdss.AddPriceSource(ps1, big1, "validator1") + rdss.AddPriceSource(ps2, big1, "validator2") + finalPrice, ok := rdss.GetFinalPriceForSourceID(1) + So(finalPrice, ShouldBeNil) + So(ok, ShouldBeFalse) + rdss.AddPriceSource(ps1_3, big1, "validator3") + finalPrice, ok = rdss.GetFinalPriceForSourceID(1) + So(finalPrice, ShouldBeNil) + So(ok, ShouldBeFalse) + rdss.AddPriceSource(ps2, big1, "validator4") + finalPrice, ok = rdss.GetFinalPriceForSourceID(1) + So(finalPrice, ShouldResemble, ps2.prices[0].PriceResult()) + So(ok, ShouldBeTrue) + }) + + }) + Convey("add msgs in aggregator", func() { + a := newAggregator(th, defaultAggMedian) + err := a.AddMsg(msgItem1) + So(err, ShouldBeNil) + finalPrice, ok := a.GetFinalPrice() + So(finalPrice, ShouldBeNil) + So(ok, ShouldBeFalse) + + err = a.AddMsg(msgItem2) + So(err, ShouldBeNil) + finalPrice, ok = a.GetFinalPrice() + So(finalPrice, ShouldBeNil) + So(ok, ShouldBeFalse) + + // failed to add duplicated msg + err = a.AddMsg(msgItem2) + So(err, ShouldNotBeNil) + + // powe exceeds 2/3 on detID=2 + err = a.AddMsg(msgItem3) + So(err, ShouldBeNil) + finalPrice, ok = a.GetFinalPrice() + So(finalPrice, ShouldResemble, &PriceResult{Price: "999", Decimal: 8}) + So(ok, ShouldBeTrue) + So(a.ds.GetFinalDetIDForSourceID(1), ShouldEqual, "2") + }) + Convey("tally in round", func() { + ctrl := gomock.NewController(t) + c := NewMockCacheReader(ctrl) + c.EXPECT(). + GetPowerForValidator(gomock.Any()). + Return(big1, true). + AnyTimes() + c.EXPECT(). + IsDeterministic(gomock.Eq(int64(1))). + Return(true, nil). + AnyTimes() + c.EXPECT(). + GetThreshold(). + Return(th). + AnyTimes() + c.EXPECT(). + IsRuleV1(gomock.Any()). + Return(true). + AnyTimes() + + r := tData.NewRound(c) + r.cache = c + feederID := r.feederID + Convey("add msg in closed quoting window", func() { + pmsg1 := protoMsgItem1 + pmsg1.FeederID = uint64(feederID) + finalPrice, addedMsgItem, err := r.Tally(pmsg1) + // quoting window not open + So(err, ShouldNotBeNil) + So(finalPrice, ShouldBeNil) + So(addedMsgItem, ShouldBeNil) + }) + Convey("open quotingWindow", func() { + r.PrepareForNextBlock(int64(params.TokenFeeders[r.feederID].StartBaseBlock)) + So(r.status, ShouldEqual, roundStatusOpen) + Convey("add msg-v1-detID1 for source1", func() { + pmsg1 := protoMsgItem1 + pmsg1.FeederID = uint64(feederID) + finalPrice, addedMsgItem, err := r.Tally(pmsg1) + So(finalPrice, ShouldBeNil) + So(addedMsgItem, ShouldResemble, pmsg1) + So(err, ShouldBeNil) + Convey("add msg-v1-detID2, success ", func() { + pmsg2 := protoMsgItem2 + pmsg2.FeederID = uint64(feederID) + finalPrice, addedMsgItem, err = r.Tally(pmsg2) + So(finalPrice, ShouldBeNil) + So(addedMsgItem, ShouldResemble, pmsg2) + So(err, ShouldBeNil) + Convey("add msg-v2-detID2, success", func() { + // v2,detID=2 + pmsg3 := protoMsgItem3 + pmsg3.FeederID = uint64(feederID) + finalPrice, addedMsgItem, err = r.Tally(pmsg3) + So(finalPrice, ShouldBeNil) + So(addedMsgItem, ShouldResemble, pmsg3) + So(err, ShouldBeNil) + Convey("two cases:", func() { + Convey("add msg-v3-detID2, finalPrice", func() { + // v3,detID=2 + pmsg4 := protoMsgItem4 + pmsg4.FeederID = uint64(feederID) + finalPrice, addedMsgItem, err = r.Tally(pmsg4) + So(finalPrice, ShouldResemble, &PriceResult{ + Price: "999", + Decimal: 8, + DetID: "2", + }) + So(addedMsgItem, ShouldResemble, pmsg4) + So(err, ShouldBeNil) + Convey("add msg-v4-detID2, recordOnly", func() { + pmsg5 := protoMsgItem5 + pmsg5.FeederID = uint64(feederID) + finalPrice, addedMsgItem, err = r.Tally(pmsg5) + So(finalPrice, ShouldBeNil) + So(addedMsgItem, ShouldResemble, pmsg5) + So(err, ShouldBeError, oracletypes.ErrQuoteRecorded) + }) + }) + Convey("add msg-v3-detID2-different-price, success", func() { + pmsg4 := protoMsgItem4_2 + pmsg4.FeederID = uint64(feederID) + finalPrice, addedMsgItem, err = r.Tally(pmsg4) + So(finalPrice, ShouldBeNil) + So(addedMsgItem, ShouldResemble, pmsg4) + So(err, ShouldBeNil) + Convey("add msg-v4-detID2, success", func() { + pmsg5 := protoMsgItem5 + pmsg5.FeederID = uint64(feederID) + finalPrice, addedMsgItem, err = r.Tally(pmsg5) + So(finalPrice, ShouldResemble, &PriceResult{ + Price: "999", + Decimal: 8, + DetID: "2", + }) + So(addedMsgItem, ShouldResemble, pmsg5) + So(err, ShouldBeNil) + }) + + }) + }) + }) + }) + }) + }) + }) + }) +} diff --git a/x/oracle/keeper/feedermanagement/algo.go b/x/oracle/keeper/feedermanagement/algo.go new file mode 100644 index 000000000..7f18f7d8c --- /dev/null +++ b/x/oracle/keeper/feedermanagement/algo.go @@ -0,0 +1,154 @@ +package feedermanagement + +import ( + "math/big" + "sort" + "strings" +) + +type AlgoType string + +const ( + AlgoMedian AlgoType = "median" +) + +type BigIntList []*big.Int + +func (b BigIntList) Len() int { + return len(b) +} + +func (b BigIntList) Less(i, j int) bool { + return b[i].Cmp(b[j]) < 0 +} + +func (b BigIntList) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} + +func (b BigIntList) Median() *big.Int { + if len(b) == 0 { + return nil + } + sort.Sort(b) + l := len(b) + if l%2 == 1 { + return b[l/2] + } + return new(big.Int).Div(new(big.Int).Add(b[l/2], b[l/2-1]), big.NewInt(2)) +} + +type AggAlgorithm interface { + Add(*PriceResult) bool + GetResult() *PriceResult + Reset() + Type() AlgoType + Equals(AggAlgorithm) bool +} + +type priceType int + +const ( + notSet priceType = iota + number + notNumber +) + +var _ AggAlgorithm = &AggMedian{} + +type AggMedian struct { + t priceType + finalString string + list []*big.Int + decimal int32 +} + +func NewAggMedian() *AggMedian { + return &AggMedian{ + list: make([]*big.Int, 0), + } +} + +func (a *AggMedian) Add(price *PriceResult) bool { + priceInt, ok := new(big.Int).SetString(price.Price, 10) + if ok { + if a.t == notNumber { + return false + } + if a.t == notSet { + a.t = number + a.list = append(a.list, priceInt) + a.decimal = price.Decimal + return true + } + if a.decimal != price.Decimal { + if a.decimal > price.Decimal { + price.Price += strings.Repeat("0", int(a.decimal-price.Decimal)) + priceInt, _ = new(big.Int).SetString(price.Price, 10) + } else { + delta := big.NewInt(int64(price.Decimal - a.decimal)) + for _, v := range a.list { + nv := new(big.Int).Mul(v, new(big.Int).Exp(big.NewInt(10), delta, nil)) + *v = *nv + } + a.decimal = price.Decimal + } + } + a.list = append(a.list, priceInt) + return true + } + // input is a string, not a number + if a.t == number { + return false + } + if a.t == notSet { + a.t = notNumber + a.finalString = price.Price + return true + } + if a.finalString != price.Price { + return false + } + return true +} + +func (a *AggMedian) GetResult() *PriceResult { + defer a.Reset() + if a.t == notSet { + return nil + } + if a.t == number { + // when a.t is set to number, the length of a.list must be bigger than 0, so the Median() must return a non-nil result + result := BigIntList(a.list).Median().String() + decimal := a.decimal + return &PriceResult{ + Price: result, + Decimal: decimal, + } + } + if len(a.finalString) == 0 { + return nil + } + result := a.finalString + return &PriceResult{ + Price: result, + } +} + +func (a *AggMedian) Reset() { + a.list = make([]*big.Int, 0) + a.t = notSet + a.decimal = 0 + a.finalString = "" +} + +func (a *AggMedian) Type() AlgoType { + return AlgoMedian +} + +func (a *AggMedian) Equals(a2 AggAlgorithm) bool { + return a.Type() == a2.Type() +} + +//nolint:unused +var defaultAggMedian = NewAggMedian() diff --git a/x/oracle/keeper/feedermanagement/caches.go b/x/oracle/keeper/feedermanagement/caches.go new file mode 100644 index 000000000..f3e116037 --- /dev/null +++ b/x/oracle/keeper/feedermanagement/caches.go @@ -0,0 +1,384 @@ +package feedermanagement + +import ( + "errors" + "fmt" + "math/big" + "reflect" + "slices" + + oracletypes "github.com/ExocoreNetwork/exocore/x/oracle/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/ethereum/go-ethereum/common" +) + +type ItemV map[string]*big.Int + +const v1RuleID = 1 + +func (c *caches) CpyForSimulation() *caches { + ret := *c + msg := *(c.msg) + params := *(c.params) + // it's safe to do shallow copy on msg, params + ret.msg = &msg + ret.params = ¶ms + validators := make(map[string]*big.Int) + // safe to range map, map copy + for v, p := range c.validators.validators { + validators[v] = new(big.Int).Set(p) + } + ret.validators = &cacheValidator{ + validators: validators, + update: c.validators.update, + totalPower: new(big.Int).Set(c.validators.totalPower), + } + + return &ret +} + +func (c *caches) Equals(c2 *caches) bool { + if c == nil || c2 == nil { + return c == c2 + } + if !c.msg.Equals(c2.msg) { + return false + } + if !c.validators.Equals(c2.validators) { + return false + } + if !c.params.Equals(c2.params) { + return false + } + return true +} + +func (c *caches) Init(k Submitter, params *oracletypes.Params, validators map[string]*big.Int) { + c.ResetCaches() + c.k = k + + c.params.add(params) + + c.validators.add(validators) +} + +func (c *caches) GetDecimalFromFeederID(feederID uint64) (int32, error) { + p := c.params.params + if feederID <= 0 || feederID > uint64(len(p.TokenFeeders)) { + return 0, errors.New("feederID not exists") + } + tf := p.TokenFeeders[feederID] + return p.Tokens[tf.TokenID].Decimal, nil +} + +func (c *caches) GetMaxNonce() int32 { + return c.params.params.GetMaxNonce() +} + +func (c *caches) GetMaxSizePrices() int32 { + return c.params.params.GetMaxSizePrices() +} + +func (c *caches) IsDeterministic(sourceID int64) (bool, error) { + sources := c.params.params.Sources + if sourceID >= int64(len(sources)) || sourceID <= 0 { + return false, errors.New("invalid sourceID") + } + return sources[sourceID].Deterministic, nil +} + +// RuleV1, we restrict the source to be Chainlink and only that source is acceptable +func (c *caches) IsRuleV1(feederID int64) bool { + ruleID := c.params.params.TokenFeeders[feederID].RuleID + return ruleID == v1RuleID && len(c.params.params.Sources) == 2 && c.params.params.Sources[1].Name == oracletypes.SourceChainlinkName +} + +func (c *caches) GetTokenIDForFeederID(feederID int64) (int64, bool) { + tf, ok := c.GetTokenFeederForFeederID(feederID) + if !ok { + return 0, false + } + // #nosec G115 // tokenID is index of slice + return int64(tf.TokenID), true +} + +// GetValidators return current validator set as ordered slice +func (c *caches) GetValidators() []string { + return c.validators.slice() +} + +func (cm *cacheMsgs) Equals(cm2 *cacheMsgs) bool { + if cm == nil || cm2 == nil { + return cm == cm2 + } + for idx, v := range *cm { + v2 := (*cm2)[idx] + if !reflect.DeepEqual(v, v2) { + return false + } + } + return true +} + +func (cm *cacheMsgs) Cpy() *cacheMsgs { + ret := make([]*oracletypes.MsgItem, 0, len(*cm)) + for _, msg := range *cm { + msgCpy := *msg + ret = append(ret, &msgCpy) + } + cmNew := cacheMsgs(ret) + return &cmNew +} + +func (cm *cacheMsgs) add(item *oracletypes.MsgItem) { + *cm = append(*cm, item) +} + +func (cm *cacheMsgs) commit(ctx sdk.Context, k Submitter) { + if len(*cm) == 0 { + return + } + recentMsgs := oracletypes.RecentMsg{ + // #nosec G115 // height is not negative + Block: uint64(ctx.BlockHeight()), + Msgs: *cm, + } + + k.SetMsgItemsForCache(ctx, recentMsgs) + + *cm = make([]*oracletypes.MsgItem, 0) +} + +func (cv *cacheValidator) Equals(cv2 *cacheValidator) bool { + if cv == nil || cv2 == nil { + return cv == cv2 + } + if cv.update != cv2.update { + return false + } + if len(cv.validators) != len(cv2.validators) { + return false + } + if cv.totalPower.Cmp(cv2.totalPower) != 0 { + return false + } + // safe to range map, map compare + for k, v := range cv.validators { + if v2, ok := cv2.validators[k]; !ok { + return false + } else if v.Cmp(v2) != 0 { + return false + } + } + return true +} + +func (cv *cacheValidator) add(validators map[string]*big.Int) { + // safe to range map, check and update all KVs with another map + for operator, newPower := range validators { + power, ok := cv.validators[operator] + if !ok { + power = common.Big0 + } + if power.Cmp(newPower) != 0 { + cv.update = true + // only do sub when power>0 + if ok { + cv.totalPower.Sub(cv.totalPower, power) + } + // use < 1 to keep it the same as 'applyValidatorChange' in dogfood + if newPower.Cmp(common.Big1) < 0 { + delete(cv.validators, operator) + continue + } + cv.totalPower.Add(cv.totalPower, newPower) + if !ok { + cv.validators[operator] = new(big.Int) + } + cv.validators[operator].Set(newPower) + } + } +} + +func (cv *cacheValidator) commit(ctx sdk.Context, k Submitter) { + if !cv.update { + return + } + // #nosec blockHeight is not negative + // TODO: consider change the define of all height types in proto to int64(since cosmossdk defined block height as int64) to get avoid all these conversion + k.SetValidatorUpdateForCache(ctx, oracletypes.ValidatorUpdateBlock{Block: uint64(ctx.BlockHeight())}) + cv.update = false +} + +func (cv *cacheValidator) size() int { + return len(cv.validators) +} + +// returned slice is ordered +func (cv *cacheValidator) slice() []string { + if cv.size() == 0 { + return nil + } + validators := make([]string, 0, cv.size()) + // safe to range map, this range is used to generate a sorted slice + for validator := range cv.validators { + validators = append(validators, validator) + } + slices.Sort(validators) + return validators +} + +func (cp *cacheParams) Equals(cp2 *cacheParams) bool { + if cp == nil || cp2 == nil { + return cp == cp2 + } + if cp.update != cp2.update { + return false + } + p1 := cp.params + p2 := cp2.params + return reflect.DeepEqual(p1, p2) +} + +func (cp *cacheParams) add(p *oracletypes.Params) { + cp.params = p + cp.update = true +} + +func (cp *cacheParams) commit(ctx sdk.Context, k Submitter) { + if !cp.update { + return + } + k.SetParamsForCache(ctx, oracletypes.RecentParams{ + // #nosec G115 blockheight is not negative + Block: uint64(ctx.BlockHeight()), + Params: cp.params, + }) + cp.update = false +} + +// memory cache +func (c *caches) AddCache(i any) error { + switch item := i.(type) { + case *oracletypes.MsgItem: + c.msg.add(item) + case *oracletypes.Params: + c.params.add(item) + case ItemV: + c.validators.add(item) + default: + return fmt.Errorf("unsuppported caceh type: %T", i) + } + return nil +} + +// Read reads the cache +func (c *caches) Read(i any) bool { + switch item := i.(type) { + case ItemV: + if item == nil { + return false + } + // safe to range map, map copy + for addr, power := range c.validators.validators { + item[addr] = power + } + return c.validators.update + case *oracletypes.Params: + if item == nil { + return false + } + *item = *c.params.params + return c.params.update + case *[]*oracletypes.MsgItem: + if item == nil { + return false + } + *item = *c.msg + return len(*c.msg) > 0 + default: + return false + } +} + +func (c *caches) GetThreshold() *threshold { + params := &oracletypes.Params{} + c.Read(params) + return &threshold{ + totalPower: c.GetTotalPower(), + thresholdA: big.NewInt(int64(params.ThresholdA)), + thresholdB: big.NewInt(int64(params.ThresholdB)), + } +} + +// GetPowerForValidator returns the power of a validator +func (c *caches) GetPowerForValidator(validator string) (power *big.Int, found bool) { + if c.validators != nil && + len(c.validators.validators) > 0 { + power = c.validators.validators[validator] + if power != nil { + found = true + } + } + // if caches not filled yet, we just return not-found instead of fetching from keeper + return +} + +// GetTotalPower returns the total power of all validators +func (c *caches) GetTotalPower() *big.Int { + return new(big.Int).Set(c.validators.totalPower) +} + +// GetTokenFeederForFeederID returns the token feeder for a feederID +func (c *caches) GetTokenFeederForFeederID(feederID int64) (tokenFeeder *oracletypes.TokenFeeder, found bool) { + if c.params != nil && + c.params.params != nil && + int64(len(c.params.params.TokenFeeders)) > feederID { + tokenFeeder = c.params.params.TokenFeeders[feederID] + found = true + } + return +} + +// SkipCommit skip real commit by setting the update flag to false +func (c *caches) SkipCommit() { + c.validators.update = false + c.params.update = false +} + +// Commit commits the cache to the KVStore +func (c *caches) Commit(ctx sdk.Context, reset bool) (msgUpdated, validatorsUpdated, paramsUpdated bool) { + if len(*(c.msg)) > 0 { + c.msg.commit(ctx, c.k) + msgUpdated = true + } + + if c.validators.update { + c.validators.commit(ctx, c.k) + validatorsUpdated = true + } + + if c.params.update { + c.params.commit(ctx, c.k) + paramsUpdated = true + } + if reset { + c.ResetCaches() + } + return +} + +func (c *caches) ResetCaches() { + *c = *(newCaches()) +} + +func newCaches() *caches { + return &caches{ + msg: new(cacheMsgs), + validators: &cacheValidator{ + validators: make(map[string]*big.Int), + totalPower: big.NewInt(0), + }, + params: &cacheParams{}, + } +} diff --git a/x/oracle/keeper/feedermanagement/data_elaborate_test.go b/x/oracle/keeper/feedermanagement/data_elaborate_test.go new file mode 100644 index 000000000..47b172c43 --- /dev/null +++ b/x/oracle/keeper/feedermanagement/data_elaborate_test.go @@ -0,0 +1,331 @@ +package feedermanagement + +import ( + "math" + + oracletypes "github.com/ExocoreNetwork/exocore/x/oracle/types" +) + +type price oracletypes.MsgItem + +func (p *price) withFeederID(feederID uint64) *price { + ret := *p + ret.FeederID = feederID + return &ret +} + +func (p *price) withValidator(validator string) *price { + ret := *p + ret.Validator = validator + return &ret +} + +func (p *price) msgItem() *oracletypes.MsgItem { + ret := (oracletypes.MsgItem)(*p) + return &ret +} + +func newPrice(prices []*oracletypes.PriceTimeDetID) *price { + return &price{ + PSources: []*oracletypes.PriceSource{ + { + SourceID: 1, + Prices: prices, + }, + }, + } +} + +type validatorSet []*price +type powers struct { + validators map[string]struct{} + p int +} +type blocks struct { + msgItemsInBlocks [][]*price + idx int + accumulated map[string]*powers + // for test cases we use int, so this condition is set to >= (equivalent to > for real bigInt cases) + threshold int + result *oracletypes.PriceTimeDetID +} + +type Blocks struct { + MsgItemsInBlocks [][]*price + Idx int + Accumulated map[string]*powers + // for test cases we use int, so this condition is set to >= (equivalent to > for real bigInt cases) + Threshold int + Result *oracletypes.PriceTimeDetID +} + +func newBlocks(t int) *blocks { + return &blocks{ + msgItemsInBlocks: make([][]*price, 0), + accumulated: make(map[string]*powers), + threshold: t, + } +} + +func NewBlocks(t int) *Blocks { + return &Blocks{ + MsgItemsInBlocks: make([][]*price, 0), + Accumulated: make(map[string]*powers), + Threshold: t, + } +} + +func (b *Blocks) AddPrices(ps []*price) { + b.MsgItemsInBlocks = append(b.MsgItemsInBlocks, ps) +} +func (b *Blocks) Next() (ps []*price, result *oracletypes.PriceTimeDetID) { + if b.Idx >= len(b.MsgItemsInBlocks) { + return nil, nil + } + + ret := b.MsgItemsInBlocks[b.Idx] + b.Idx++ + // skip calculation, just return next msgs and result + if b.Result != nil { + return ret, b.Result + } + + // calculate the expected result(fianlPrice) + for _, pMsgItem := range ret { + // TODO: test only, we assume only first element is valid(sourceID=1) + if pMsgItem == nil { + break + } + if len(pMsgItem.PSources) < 1 || pMsgItem.PSources[0].SourceID != 1 { + panic("we support v1 test only") + } + validator := pMsgItem.Validator + pPriceTimeDetIDs := pMsgItem.PSources[0].Prices + for _, pPriceTimeDetID := range pPriceTimeDetIDs { + acPower := 0 + if item := b.Accumulated[pPriceTimeDetID.DetID]; item != nil { + if _, ok := item.validators[validator]; !ok { + item.validators[validator] = struct{}{} + item.p++ + acPower = item.p + } + // we dont update the tmp variable acPower if validator had been seen + } else { + b.Accumulated[pPriceTimeDetID.DetID] = &powers{ + validators: map[string]struct{}{validator: {}}, + p: 1, + } + acPower = 1 + } + if acPower >= b.Threshold { + if b.Result != nil && pPriceTimeDetID.DetID > b.Result.DetID { + b.Result = pPriceTimeDetID + } + } + } + } + return ret, nil +} + +func (b *blocks) AddPirces(ps []*price) { + b.msgItemsInBlocks = append(b.msgItemsInBlocks, ps) +} + +func (b *blocks) next() (ps []*price, result *oracletypes.PriceTimeDetID) { + if b.idx >= len(b.msgItemsInBlocks) { + return nil, nil + } + + ret := b.msgItemsInBlocks[b.idx] + b.idx++ + // skip calculation, just return next msgs and result + if b.result != nil { + return ret, b.result + } + + // calculate the expected result(fianlPrice) + for _, pMsgItem := range ret { + // TODO: test only, we assume only first element is valid(sourceID=1) + if len(pMsgItem.PSources) < 1 || pMsgItem.PSources[0].SourceID != 1 { + panic("we support v1 test only") + } + validator := pMsgItem.Validator + pPriceTimeDetIDs := pMsgItem.PSources[0].Prices + for _, pPriceTimeDetID := range pPriceTimeDetIDs { + acPower := 0 + if item := b.accumulated[pPriceTimeDetID.DetID]; item != nil { + if _, ok := item.validators[validator]; !ok { + item.validators[validator] = struct{}{} + item.p++ + acPower = item.p + } + // we dont update the tmp variable acPower if validator had been seen + } else { + b.accumulated[pPriceTimeDetID.DetID] = &powers{ + validators: map[string]struct{}{validator: {}}, + p: 1, + } + acPower = 1 + } + if acPower >= b.threshold { + b.result = pPriceTimeDetID + return ret, b.result + } + } + } + return ret, nil +} + +func (b *blocks) reset() { + b.idx = 0 + b.accumulated = make(map[string]*powers) +} + +func (b *Blocks) Reset() { + b.Idx = 0 + b.Accumulated = make(map[string]*powers) + b.Result = nil +} + +func generateAllValidatorSets(ps []*price, validators []string) []validatorSet { + total := len(validators) + ret := make([]validatorSet, 0, 8^total) + + vs := make([]*price, total) + var f func(int, int) + f = func(depth int, total int) { + if depth == 0 { + cpy := make([]*price, total) + // price never changed, it's fine to just copy the pointers + copy(cpy, vs) + ret = append(ret, cpy) + return + } + for _, p := range ps { + if p != nil { + vs[total-depth] = p.withFeederID(1).withValidator(validators[total-depth]) + } + f(depth-1, total) + } + } + f(total, total) + return ret +} + +// this method has some hard coded value for easy listing all cases +// it restrict validatorSet corresponding for 4 validators +// we set this sizeValidators and check for caller to notice +// func generateAllBlocks(validatorSets []validatorSet, quotingWindow int, sizeValidators int) []*blocks { +func generateAllBlocks(validatorSets []validatorSet, quotingWindow int, sizeValidators int) []*Blocks { + // TODO: support arbitrary size + if sizeValidators != 4 { + // this variable is not actually used in the following process for now + panic("only support for 4 validators for test case") + } + // count of possible combinations for one validatorSet + count := int(math.Pow(math.Pow(2, float64(sizeValidators)), float64(quotingWindow))) + ret := make([]*Blocks, 0, len(validatorSets)*count) + for _, vs := range validatorSets { + // TODO: this should be generated from seizeValidtors instead of hard code + // but it might break the defination as 'int', we just set 3 here for simplify temporary + tmpBs := make([][]*price, 0) + var f func(int, [][]int) + + f = func(depth int, idxs [][]int) { + if depth == 0 { + if len(tmpBs) != 3 { + panic("length not equal to 3") + } + // bs := newBlocks(3) + bs := NewBlocks(3) + for _, tmp := range tmpBs { + cpy := make([]*price, len(tmp)) + copy(cpy, tmp) + bs.AddPrices(cpy) + } + ret = append(ret, bs) + return + } + if idxs == nil { + // depth-- + // 1 validators, including nil(zero validator) + for i := -1; i < 4; i++ { + if i == -1 { + tmpBs = append(tmpBs, []*price{nil}) + } else { + tmpBs = append(tmpBs, []*price{vs[i]}) + } + f(depth-1, nil) + + if len(tmpBs) > 0 { + tmpBs = tmpBs[:len(tmpBs)-1] + } + + } + + // 2 validators + + f(depth, [][]int{{0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 3}, {2, 3}}) + + // 3 validators + f(depth, [][]int{{0, 1, 2}, {0, 1, 3}, {0, 2, 3}, {1, 2, 3}}) + + // 4 ovalidators + f(depth, [][]int{{0, 1, 2, 3}}) + } else { + for _, idx := range idxs { + tmp := make([]*price, 0, len(idx)) + for _, id := range idx { + tmp = append(tmp, vs[id]) + } + tmpBs = append(tmpBs, tmp) + f(depth-1, nil) + if len(tmpBs) > 0 { + tmpBs = tmpBs[:len(tmpBs)-1] + } + + } + } + } + + f(3, nil) + } + return ret +} + +var ( + // only consider about combination + // TODO: add cases as Permutation ? + prices []*price = []*price{ + newPrice([]*oracletypes.PriceTimeDetID{ + {Price: "12100000000", Decimal: 8, DetID: "1"}, + }), + newPrice([]*oracletypes.PriceTimeDetID{ + {Price: "12700000000", Decimal: 8, DetID: "2"}, + }), + newPrice([]*oracletypes.PriceTimeDetID{ + {Price: "12900000000", Decimal: 8, DetID: "3"}, + }), + + newPrice([]*oracletypes.PriceTimeDetID{ + {Price: "12100000000", Decimal: 8, DetID: "1"}, + {Price: "12700000000", Decimal: 8, DetID: "2"}, + }), + newPrice([]*oracletypes.PriceTimeDetID{ + {Price: "12100000000", Decimal: 8, DetID: "1"}, + {Price: "12900000000", Decimal: 8, DetID: "3"}, + }), + newPrice([]*oracletypes.PriceTimeDetID{ + {Price: "12700000000", Decimal: 8, DetID: "2"}, + {Price: "12900000000", Decimal: 8, DetID: "3"}, + }), + + newPrice([]*oracletypes.PriceTimeDetID{ + {Price: "12100000000", Decimal: 8, DetID: "1"}, + {Price: "12700000000", Decimal: 8, DetID: "2"}, + {Price: "12900000000", Decimal: 8, DetID: "3"}, + }), + // 0 price should be considered + nil, + } +) diff --git a/x/oracle/keeper/feedermanagement/data_test.go b/x/oracle/keeper/feedermanagement/data_test.go new file mode 100644 index 000000000..32eed1c65 --- /dev/null +++ b/x/oracle/keeper/feedermanagement/data_test.go @@ -0,0 +1,194 @@ +package feedermanagement + +import oracletypes "github.com/ExocoreNetwork/exocore/x/oracle/types" + +var ( + ps1 = &priceSource{ + deterministic: true, + sourceID: 1, + prices: []*PriceInfo{{Price: "999", Decimal: 8, DetID: "1"}}, + } + ps1_2 = &priceSource{ + deterministic: true, + sourceID: 1, + prices: []*PriceInfo{{Price: "999", Decimal: 8, DetID: "1"}, {Price: "998", Decimal: 8, DetID: "1"}}, + } + ps1_3 = &priceSource{ + deterministic: true, + sourceID: 1, + prices: []*PriceInfo{{Price: "999", Decimal: 8, DetID: "1"}, {Price: "999", Decimal: 8, DetID: "2"}}, + } + ps2 = &priceSource{ + deterministic: true, + sourceID: 1, + prices: []*PriceInfo{{Price: "999", Decimal: 8, DetID: "2"}}, + } + ps3 = &priceSource{ + deterministic: true, + sourceID: 2, + prices: []*PriceInfo{{Price: "999", Decimal: 8, DetID: "3"}}, + } + ps3_2 = &priceSource{ + deterministic: true, + sourceID: 1, + prices: []*PriceInfo{{Price: "999", Decimal: 8, DetID: "3"}}, + } + ps4 = &priceSource{ + deterministic: true, + sourceID: 1, + prices: []*PriceInfo{{Price: "999", Decimal: 8, DetID: "2"}, {Price: "999", Decimal: 8, DetID: "2"}}, + } + ps5 = &priceSource{ + deterministic: true, + sourceID: 1, + detIDs: map[string]struct{}{"1": {}, "2": {}}, + prices: []*PriceInfo{{Price: "999", Decimal: 8, DetID: "1"}, {Price: "999", Decimal: 8, DetID: "2"}}, + } + ps6 = &priceSource{ + deterministic: true, + sourceID: 1, + detIDs: map[string]struct{}{"1": {}, "2": {}, "3": {}}, + prices: []*PriceInfo{{Price: "999", Decimal: 8, DetID: "1"}, {Price: "999", Decimal: 8, DetID: "2"}, {Price: "999", Decimal: 8, DetID: "3"}}, + } + msgItem1 = &MsgItem{ + FeederID: 1, + Validator: "validator1", + Power: big1, + PriceSources: []*priceSource{ps1_2, ps2}, + } + msgItem1_2 = &MsgItem{ + FeederID: 1, + Validator: "validator1", + Power: big1, + PriceSources: []*priceSource{ps1, ps2}, + } + msgItem1_3 = &MsgItem{ + FeederID: 1, + Validator: "validator1", + Power: big1, + PriceSources: []*priceSource{ps1}, + } + msgItem2 = &MsgItem{ + FeederID: 1, + Validator: "validator2", + Power: big1, + PriceSources: []*priceSource{ps1_2, ps2}, + } + msgItem2_2 = &MsgItem{ + FeederID: 1, + Validator: "validator2", + Power: big1, + PriceSources: []*priceSource{ps1, ps2}, + } + msgItem3 = &MsgItem{ + FeederID: 1, + Validator: "validator3", + Power: big1, + PriceSources: []*priceSource{ps2}, + } + protoMsgItem1 = newTestProtoMsgItem(1, "validator1", "999", "1") + protoMsgItem2 = newTestProtoMsgItem(1, "validator1", "999", "2") + protoMsgItem3 = newTestProtoMsgItem(1, "validator2", "999", "2") + protoMsgItem4 = newTestProtoMsgItem(1, "validator3", "999", "2") + protoMsgItem4_2 = newTestProtoMsgItem(1, "validator3", "777", "2") + protoMsgItem5 = newTestProtoMsgItem(1, "validator4", "999", "2") + + pr1 = &PriceResult{ + Price: "999", + Decimal: 8, + DetID: "1", + Timestamp: timestamp, + } + pr1_2 = &PriceResult{ + Price: "999", + Decimal: 8, + } + pw1 = &PricePower{ + Price: &PriceInfo{ + Price: "999", + Decimal: 8, + DetID: "1", + Timestamp: timestamp, + }, + Power: big1, + Validators: map[string]struct{}{"validator1": {}}, + } + pw2 = &PricePower{ + Price: &PriceInfo{ + Price: "999", + Decimal: 8, + DetID: "2", + Timestamp: timestamp, + }, + Power: big1, + Validators: map[string]struct{}{"validator1": {}}, + } + pw2_2 = &PricePower{ + Price: &PriceInfo{ + Price: "999", + Decimal: 8, + DetID: "2", + Timestamp: timestamp, + }, + Power: big2, + Validators: map[string]struct{}{"validator1": {}, "validator2": {}}, + } + + pw3 = &PricePower{ + Price: &PriceInfo{ + Price: "999", + Decimal: 8, + DetID: "2", + Timestamp: timestamp, + }, + Power: big1, + Validators: map[string]struct{}{"validator2": {}}, + } + pw3_2 = &PricePower{ + Price: &PriceInfo{ + Price: "999", + Decimal: 8, + DetID: "2", + Timestamp: timestamp, + }, + Power: big3, + Validators: map[string]struct{}{"validator1": {}, "validator2": {}, "validator3": {}}, + } + + pw4 = &PricePower{ + Price: &PriceInfo{ + Price: "999", + Decimal: 8, + DetID: "2", + Timestamp: timestamp, + }, + Power: big1, + Validators: map[string]struct{}{"validator3": {}}, + } + pw5 = &PricePower{ + Price: &PriceInfo{ + Price: "999", + Decimal: 8, + DetID: "2", + Timestamp: timestamp, + }, + Power: big1, + Validators: map[string]struct{}{"validator4": {}}, + } +) + +func newTestProtoMsgItem(feederID uint64, validator string, price string, detID string) *oracletypes.MsgItem { + return &oracletypes.MsgItem{ + FeederID: feederID, + PSources: []*oracletypes.PriceSource{{ + SourceID: 1, + Prices: []*oracletypes.PriceTimeDetID{{ + Price: price, + Decimal: 8, + DetID: detID, + Timestamp: timestamp, + }}, + }}, + Validator: validator, + } +} diff --git a/x/oracle/keeper/feedermanagement/elaborate_test.go b/x/oracle/keeper/feedermanagement/elaborate_test.go new file mode 100644 index 000000000..4827eabab --- /dev/null +++ b/x/oracle/keeper/feedermanagement/elaborate_test.go @@ -0,0 +1,77 @@ +//go:build local + +package feedermanagement + +import ( + "fmt" + "testing" + + gomock "go.uber.org/mock/gomock" +) + +// this test elaborate all combinations for {4validators, 3maxNonce, 3detIDs} +func TestRoundTallyElaborate(t *testing.T) { + ret := generateAllValidatorSets(prices, []string{"v1", "v2", "v3", "v4"}) + tests := generateAllBlocks(ret, 3, 4) + ctrl := gomock.NewController(t) + c := NewMockCacheReader(ctrl) + c.EXPECT(). + GetPowerForValidator(gomock.Any()). + Return(big1, true). + AnyTimes() + c.EXPECT(). + IsDeterministic(gomock.Eq(int64(1))). + Return(true, nil). + AnyTimes() + c.EXPECT(). + GetThreshold(). + Return(th). + AnyTimes() + c.EXPECT(). + IsRuleV1(gomock.Any()). + Return(true). + AnyTimes() + + total := len(tests) + for idx, tt := range tests { + fmt.Printf("total:%d, running:%d\r\n", total, idx) + t.Run(fmt.Sprintf("case_%d", idx), func(t *testing.T) { + r := tData.NewRoundWithFeederID(c, 1) + r.cache = c + r.PrepareForNextBlock(int64(params.TokenFeeders[1].StartBaseBlock)) + p, rslt := tt.Next() + for p != nil { + var pRslt *PriceResult + for _, pItem := range p { + if pItem == nil { + continue + } + if pRsltTmp, _, _ := r.Tally(pItem.msgItem()); pRsltTmp != nil { + pRslt = pRsltTmp + } + } + if rslt != nil && (pRslt == nil || rslt.Price != pRslt.Price) { + tt.Reset() + fmt.Println("fail case:", idx, "Tally.result:", pRslt) + p, rslt := tt.Next() + idx2 := 1 + for p != nil { + fmt.Printf("block_%d ", idx2) + idx2++ + for idx3, pi := range p { + fmt.Printf("msgItem_%d: %v ", idx3, pi) + } + fmt.Println() + fmt.Println(rslt) + p, rslt = tt.Next() + } + t.Fatal("failed") + } + if rslt != nil { + break + } + p, rslt = tt.Next() + } + }) + } +} diff --git a/x/oracle/keeper/feedermanagement/feedermanager.go b/x/oracle/keeper/feedermanagement/feedermanager.go new file mode 100644 index 000000000..d9bcdaef3 --- /dev/null +++ b/x/oracle/keeper/feedermanagement/feedermanager.go @@ -0,0 +1,932 @@ +package feedermanagement + +import ( + "errors" + "fmt" + "math/big" + "sort" + "strconv" + "strings" + + sdkerrors "cosmossdk.io/errors" + "github.com/ExocoreNetwork/exocore/x/oracle/keeper/common" + oracletypes "github.com/ExocoreNetwork/exocore/x/oracle/types" + cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" +) + +func NewFeederManager(k common.KeeperOracle) *FeederManager { + return &FeederManager{ + k: k, + sortedFeederIDs: make([]int64, 0), + rounds: make(map[int64]*round), + cs: nil, + } +} + +//nolint:revive +func (f *FeederManager) GetCaches() *caches { + return f.cs +} + +func (f *FeederManager) InitCachesForTest(k Submitter, params *oracletypes.Params, validators map[string]*big.Int) { + f.cs = newCaches() + f.cs.Init(k, params, validators) +} + +func (f *FeederManager) GetParamsFromCache() *oracletypes.Params { + return f.cs.params.params +} + +func (f *FeederManager) GetMaxNonceFromCache() int32 { + return f.cs.GetMaxNonce() +} + +func (f *FeederManager) GetMaxSizePricesFromCache() int32 { + return f.cs.GetMaxSizePrices() +} + +func (f *FeederManager) GetTokenIDForFeederID(feederID int64) (int64, bool) { + return f.cs.GetTokenIDForFeederID(feederID) +} + +func (f *FeederManager) SetKeeper(k common.KeeperOracle) { + f.k = k +} + +func (f *FeederManager) SetNilCaches() { + f.cs = nil +} + +// BeginBlock initializes the caches and slashing records, and setup the rounds +func (f *FeederManager) BeginBlock(ctx sdk.Context) (recovered bool) { + // if the cache is nil and we are not in recovery mode, init the caches + if f.cs == nil { + var err error + recovered, err = f.recovery(ctx) + // it's safe to panic since this will only happen when the node is starting with something wrong in the store + if err != nil { + panic(err) + } + // init feederManager if failed to recovery, this should only happened on block_height==1 + if !recovered { + f.initCaches(ctx) + f.SetParamsUpdated() + f.SetValidatorsUpdated() + } + f.initBehaviorRecords(ctx, ctx.BlockHeight()) + // in recovery mode, snapshot of feederManager is set in the beginblock instead of in the process of replaying endblockInrecovery + f.updateCheckTx() + } + return +} + +func (f *FeederManager) EndBlock(ctx sdk.Context) { + // update params and validator set if necessary in caches and commit all updated information + addedValidators := f.updateAndCommitCaches(ctx) + + // update Slashing related records (reportInfo, missCountBitArray), handle case for 1. resetSlashing, 2. new validators added for validatorset change + f.updateBehaviorRecordsForNextBlock(ctx, addedValidators) + + // update rounds including create new rounds based on params change, remove expired rounds + // handleQuoteBehavior for ending quotes of rounds + // commit state of mature rounds + f.updateAndCommitRounds(ctx) + + // set status to open for rounds before their quoting window + feederIDs := f.prepareRounds(ctx) + // remove nonces for closing quoting-window and set nonces for opening quoting-window + f.setupNonces(ctx, feederIDs) + + f.ResetFlags() + + f.updateCheckTx() +} + +func (f *FeederManager) EndBlockInRecovery(ctx sdk.Context, params *oracletypes.Params) { + if params != nil { + f.SetParamsUpdated() + _ = f.cs.AddCache(params) + } + f.updateAndCommitRoundsInRecovery(ctx) + f.prepareRounds(ctx) + f.ResetFlags() +} + +func (f *FeederManager) setupNonces(ctx sdk.Context, feederIDs []int64) { + logger := f.k.Logger(ctx) + height := ctx.BlockHeight() + // the order does not matter, it's safe to update independent state in non-deterministic order + // no need to go through all 'hash' process to range sorted key slice + feederIDsUint64 := make([]uint64, 0, len(f.rounds)) + for _, r := range f.rounds { + // remove nonces for closed quoting windows or when forceSeal is marked + if r.IsQuotingWindowEnd(height) || f.forceSeal { + logger.Debug("clear nonces for closing quoting window or forceSeal", "feederID", r.feederID, "roundID", r.roundID, "basedBlock", r.roundBaseBlock, "height", height, "forceSeal", f.forceSeal) + // items will be removed from slice and keep the order, so it's safe to delete items in different order + // #nosec G115 // feederID is index of slice + feederIDsUint64 = append(feederIDsUint64, uint64(r.feederID)) + } + } + + if len(feederIDsUint64) > 0 { + if f.forceSeal { + f.k.RemoveNonceWithFeederIDsForAll(ctx, feederIDsUint64) + } else { + f.k.RemoveNonceWithFeederIDsForValidators(ctx, feederIDsUint64, f.cs.GetValidators()) + } + } + + if len(feederIDs) == 0 { + return + } + // setup nonces for opening quoting windows + // items need to be insert into slice in order, so feederIDs is sorted + sort.Slice(feederIDs, func(i, j int) bool { return feederIDs[i] < feederIDs[j] }) + validators := f.cs.GetValidators() + feederIDsUint64 = make([]uint64, 0, len(feederIDs)) + for _, feederID := range feederIDs { + r := f.rounds[feederID] + logger.Debug("init nonces for new quoting window", "feederID", feederID, "roundID", r.roundID, "basedBlock", r.roundBaseBlock, "height", height) + // #nosec G115 -- feederID is index of slice + feederIDsUint64 = append(feederIDsUint64, uint64(feederID)) + } + f.k.AddZeroNonceItemWithFeederIDsForValidators(ctx, feederIDsUint64, validators) +} + +func (f *FeederManager) initBehaviorRecords(ctx sdk.Context, height int64) { + if !f.validatorsUpdated { + return + } + validators := f.cs.GetValidators() + for _, validator := range validators { + f.k.InitValidatorReportInfo(ctx, validator, height) + } +} + +func (f *FeederManager) updateBehaviorRecordsForNextBlock(ctx sdk.Context, addedValidators []string) { + height := ctx.BlockHeight() + 1 + if f.resetSlashing { + // reset all validators' reportInfo + f.k.ClearAllValidatorReportInfo(ctx) + f.k.ClearAllValidatorMissedRoundBitArray(ctx) + validators := f.cs.GetValidators() + // order does not matter for independent state update + for _, validator := range validators { + f.k.InitValidatorReportInfo(ctx, validator, height) + } + } else if f.validatorsUpdated { + // order does not matter for independent state update + for _, validator := range addedValidators { + // add possible new added validator info for slashing tracking + f.k.InitValidatorReportInfo(ctx, validator, height) + } + } +} + +// praepareRounds prepares the rounds for the next block, and returns the feederIDs of the rounds that are open on next block +func (f *FeederManager) prepareRounds(ctx sdk.Context) []int64 { + logger := f.k.Logger(ctx) + feederIDs := make([]int64, 0) + height := ctx.BlockHeight() + // it's safe to range map directly, this is just used to update memory state + for _, r := range f.rounds { + if open := r.PrepareForNextBlock(ctx.BlockHeight()); open { + feederIDs = append(feederIDs, r.feederID) + // logs might not be displayed in order, it's marked with [mem] to indicate that this is a memory state update + logger.Info("[mem] open quoting window for round", "feederID", r.feederID, "roundID", r.roundID, "basedBlock", r.roundBaseBlock, "height", height) + } + } + return feederIDs +} + +// 1. update and commit Params if updated +// 2. update and commit validatorPowers if updated +// forceSeal: 1. params has some modifications related to quoting. 2.validatorSet changed +// resetSlashing: params has some modifications related to oracle_slashing +// func (f *FeederManager) updateAndCommitCaches(ctx sdk.Context) (forceSeal, resetSlashing bool, prevValidators, addedValidators []string) { +func (f *FeederManager) updateAndCommitCaches(ctx sdk.Context) (activeValidators []string) { + // update params in caches + if f.paramsUpdated { + paramsOld := &oracletypes.Params{} + f.cs.Read(paramsOld) + params := f.k.GetParams(ctx) + if paramsOld.IsForceSealingUpdate(¶ms) { + f.SetForceSeal() + } + if paramsOld.IsSlashingResetUpdate(¶ms) { + f.SetResetSlasing() + } + _ = f.cs.AddCache(¶ms) + } + + // update validators + validatorUpdates := f.k.GetValidatorUpdates(ctx) + if len(validatorUpdates) > 0 { + f.SetValidatorsUpdated() + f.SetForceSeal() + activeValidators = make([]string, 0) + validatorMap := make(map[string]*big.Int) + for _, vu := range validatorUpdates { + pubKey, _ := cryptocodec.FromTmProtoPublicKey(vu.PubKey) + validatorStr := sdk.ConsAddress(pubKey.Address()).String() + validatorMap[validatorStr] = big.NewInt(vu.Power) + if vu.Power > 0 { + activeValidators = append(activeValidators, validatorStr) + } + } + // update validator set information in cache + _ = f.cs.AddCache(ItemV(validatorMap)) + } + + // commit caches: msgs is exists, params if updated, validatorPowers is updated + _, vUpdated, pUpdated := f.cs.Commit(ctx, false) + if vUpdated || pUpdated { + f.k.Logger(ctx).Info("update caches", "validatorUpdated", vUpdated, "paramsUpdated", pUpdated) + } + return activeValidators +} + +func (f *FeederManager) commitRoundsInRecovery() { + // safe to range map directly, this is just used to update memory state, we don't update state in recovery mode + for _, r := range f.rounds { + if r.Committable() { + r.FinalPrice() + r.status = roundStatusClosed + } + // close all quotingWindow to skip current rounds' 'handleQuotingMisBehavior' + if f.forceSeal { + r.closeQuotingWindow() + } + } +} + +func (f *FeederManager) commitRounds(ctx sdk.Context) { + logger := f.k.Logger(ctx) + height := ctx.BlockHeight() + successFeederIDs := make([]string, 0) + // it's safe to range map directly since the sate update is independent for each feederID, however we use sortedFeederIDs to keep the order of logs + // this can be replaced by map iteration directly when better performance is needed + for _, feederID := range f.sortedFeederIDs { + r := f.rounds[feederID] + if r.Committable() { + finalPrice, ok := r.FinalPrice() + if !ok { + logger.Info("commit round with price from previous", "feederID", r.feederID, "roundID", r.roundID, "baseBlock", r.roundBaseBlock, "height", height) + // #nosec G115 // tokenID is index of slice + f.k.GrowRoundID(ctx, uint64(r.tokenID)) + } else { + if f.cs.IsRuleV1(r.feederID) { + priceCommit := finalPrice.ProtoPriceTimeRound(r.roundID, ctx.BlockTime().Format(oracletypes.TimeLayout)) + logger.Info("commit round with aggregated price", "feederID", r.feederID, "roundID", r.roundID, "baseBlock", r.roundBaseBlock, "price", priceCommit, "height", height) + + // #nosec G115 // tokenID is index of slice + f.k.AppendPriceTR(ctx, uint64(r.tokenID), *priceCommit, finalPrice.DetID) + // f.k.AppendPriceTR(ctx, uint64(r.tokenID), *priceCommit) + + fstr := strconv.FormatInt(feederID, 10) + successFeederIDs = append(successFeederIDs, fstr) // there's no valid price for any round yet + } else { + logger.Error("We currently only support rules under oracle V1: only allow price from source Chainlink", "feederID", r.feederID) + } + } + // keep aggregator for possible 'handlQuotingMisBehavior' at quotingWindowEnd + r.status = roundStatusClosed + } + // close all quotingWindow to skip current rounds' 'handlQuotingMisBehavior' + if f.forceSeal { + r.closeQuotingWindow() + } + } + if len(successFeederIDs) > 0 { + feederIDsStr := strings.Join(successFeederIDs, "_") + ctx.EventManager().EmitEvent(sdk.NewEvent( + oracletypes.EventTypeCreatePrice, + sdk.NewAttribute(oracletypes.AttributeKeyPriceUpdated, oracletypes.AttributeValuePriceUpdatedSuccess), + sdk.NewAttribute(oracletypes.AttributeKeyFeederIDs, feederIDsStr), + )) + } +} + +func (f *FeederManager) handleQuotingMisBehaviorInRecovery(ctx sdk.Context) { + height := ctx.BlockHeight() + logger := f.k.Logger(ctx) + // it's safe to range map directly, no state in kvStore will be updated in recovery mode, only memory state is updated + for _, r := range f.rounds { + if r.IsQuotingWindowEnd(height) && r.a != nil { + validators := f.cs.GetValidators() + for _, validator := range validators { + _, found := f.k.GetValidatorReportInfo(ctx, validator) + if !found { + logger.Error(fmt.Sprintf("Expected report info for validator %s but not found", validator)) + continue + } + _, malicious := r.PerformanceReview(validator) + if malicious { + r.getFinalDetIDForSourceID(oracletypes.SourceChainlinkID) + r.FinalPrice() + } + } + r.closeQuotingWindow() + } + } +} + +func (f *FeederManager) handleQuotingMisBehavior(ctx sdk.Context) { + height := ctx.BlockHeight() + logger := f.k.Logger(ctx) + + // it's safe to range map directly, each state update is independent for each feederID + // state to be updated: {validatorReportInfo, validatorMissedRoundBitArray, signInfo, assets} of individual validator + // we use sortedFeederIDs to keep the order of logs + // this can be replaced by map iteration directly when better performance is needed + for _, feederID := range f.sortedFeederIDs { + r := f.rounds[feederID] + if r.IsQuotingWindowEnd(height) { + if _, found := r.FinalPrice(); !found { + r.closeQuotingWindow() + continue + } + validators := f.cs.GetValidators() + for _, validator := range validators { + reportedInfo, found := f.k.GetValidatorReportInfo(ctx, validator) + if !found { + logger.Error(fmt.Sprintf("Expected report info for validator %s but not found", validator)) + continue + } + miss, malicious := r.PerformanceReview(validator) + if malicious { + detID := r.getFinalDetIDForSourceID(oracletypes.SourceChainlinkID) + finalPrice, _ := r.FinalPrice() + logger.Info( + "confirmed malicious price", + "validator", validator, + "infraction_height", height, + "infraction_time", ctx.BlockTime(), + "feederID", r.feederID, + "detID", detID, + "sourceID", oracletypes.SourceChainlinkID, + "finalPrice", finalPrice, + ) + consAddr, err := sdk.ConsAddressFromBech32(validator) + if err != nil { + f.k.Logger(ctx).Error("when do orale_performance_review, got invalid consAddr string. This should never happen", "validatorStr", validator) + continue + } + + operator := f.k.ValidatorByConsAddr(ctx, consAddr) + if operator != nil && !operator.IsJailed() { + power, _ := f.cs.GetPowerForValidator(validator) + coinsBurned := f.k.SlashWithInfractionReason(ctx, consAddr, height, power.Int64(), f.k.GetSlashFractionMalicious(ctx), stakingtypes.Infraction_INFRACTION_UNSPECIFIED) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + oracletypes.EventTypeOracleSlash, + sdk.NewAttribute(oracletypes.AttributeKeyValidatorKey, validator), + sdk.NewAttribute(oracletypes.AttributeKeyPower, fmt.Sprintf("%d", power)), + sdk.NewAttribute(oracletypes.AttributeKeyReason, oracletypes.AttributeValueMaliciousReportPrice), + sdk.NewAttribute(oracletypes.AttributeKeyJailed, validator), + sdk.NewAttribute(oracletypes.AttributeKeyBurnedCoins, coinsBurned.String()), + ), + ) + f.k.Jail(ctx, consAddr) + jailUntil := ctx.BlockHeader().Time.Add(f.k.GetMaliciousJailDuration(ctx)) + f.k.JailUntil(ctx, consAddr, jailUntil) + + reportedInfo.MissedRoundsCounter = 0 + reportedInfo.IndexOffset = 0 + f.k.ClearValidatorMissedRoundBitArray(ctx, validator) + } + continue + } + reportedRoundsWindow := f.k.GetReportedRoundsWindow(ctx) + // #nosec G115 + index := uint64(reportedInfo.IndexOffset % reportedRoundsWindow) + reportedInfo.IndexOffset++ + // Update reported round bit array & counter + // This counter just tracks the sum of the bit array + // That way we avoid needing to read/write the whole array each time + previous := f.k.GetValidatorMissedRoundBitArray(ctx, validator, index) + switch { + case !previous && miss: + // Array value has changed from not missed to missed, increment counter + f.k.SetValidatorMissedRoundBitArray(ctx, validator, index, true) + reportedInfo.MissedRoundsCounter++ + case previous && !miss: + // Array value has changed from missed to not missed, decrement counter + f.k.SetValidatorMissedRoundBitArray(ctx, validator, index, false) + reportedInfo.MissedRoundsCounter-- + default: + // Array value at this index has not changed, no need to update counter + } + + minReportedPerWindow := f.k.GetMinReportedPerWindow(ctx) + + if miss { + ctx.EventManager().EmitEvent( + sdk.NewEvent( + oracletypes.EventTypeOracleLiveness, + sdk.NewAttribute(oracletypes.AttributeKeyValidatorKey, validator), + sdk.NewAttribute(oracletypes.AttributeKeyMissedRounds, fmt.Sprintf("%d", reportedInfo.MissedRoundsCounter)), + sdk.NewAttribute(oracletypes.AttributeKeyHeight, fmt.Sprintf("%d", height)), + ), + ) + + logger.Info( + "oracle_absent validator", + "height", ctx.BlockHeight(), + "validator", validator, + "missed", reportedInfo.MissedRoundsCounter, + "threshold", minReportedPerWindow, + ) + } + + minHeight := reportedInfo.StartHeight + reportedRoundsWindow + maxMissed := reportedRoundsWindow - minReportedPerWindow + // if we are past the minimum height and the validator has missed too many rounds reporting prices, punish them + if height > minHeight && reportedInfo.MissedRoundsCounter > maxMissed { + consAddr, err := sdk.ConsAddressFromBech32(validator) + if err != nil { + f.k.Logger(ctx).Error("when do orale_performance_review, got invalid consAddr string. This should never happen", "validatorStr", validator) + continue + } + operator := f.k.ValidatorByConsAddr(ctx, consAddr) + if operator != nil && !operator.IsJailed() { + // missing rounds confirmed: just jail the validator + f.k.Jail(ctx, consAddr) + jailUntil := ctx.BlockHeader().Time.Add(f.k.GetMissJailDuration(ctx)) + f.k.JailUntil(ctx, consAddr, jailUntil) + + // We need to reset the counter & array so that the validator won't be immediately slashed for miss report info upon rebonding. + reportedInfo.MissedRoundsCounter = 0 + reportedInfo.IndexOffset = 0 + f.k.ClearValidatorMissedRoundBitArray(ctx, validator) + + logger.Info( + "jailing validator due to oracle_liveness fault", + "height", height, + "validator", consAddr.String(), + "min_height", minHeight, + "threshold", minReportedPerWindow, + "jailed_until", jailUntil, + ) + } else { + // validator was (a) not found or (b) already jailed so we do not slash + logger.Info( + "validator would have been slashed for too many missed reporting price, but was either not found in store or already jailed", + "validator", validator, + ) + } + } + // Set the updated reportInfo + f.k.SetValidatorReportInfo(ctx, validator, reportedInfo) + } + r.closeQuotingWindow() + } + } +} + +func (f *FeederManager) setCommittableState(ctx sdk.Context) { + if f.forceSeal { + // safe to range map. update memory state only, the result would be the same in any order + for _, r := range f.rounds { + if r.status == roundStatusOpen { + r.status = roundStatusCommittable + } + } + } else { + height := ctx.BlockHeight() + // safe to range map. update memory state only, the result would be the same in any order + for _, r := range f.rounds { + if r.IsQuotingWindowEnd(height) && r.status == roundStatusOpen { + r.status = roundStatusCommittable + } + } + } +} + +func (f *FeederManager) updateRoundsParamsAndAddNewRounds(ctx sdk.Context) { + height := ctx.BlockHeight() + logger := f.k.Logger(ctx) + + if f.paramsUpdated { + params := &oracletypes.Params{} + f.cs.Read(params) + existsFeederIDs := make(map[int64]struct{}) + // safe to range map. update memory state only, the result would be the same in any order + for _, r := range f.rounds { + r.UpdateParams(params.TokenFeeders[r.feederID], int64(params.MaxNonce)) + existsFeederIDs[r.feederID] = struct{}{} + } + // add new rounds + for feederID, tokenFeeder := range params.TokenFeeders { + if feederID == 0 { + continue + } + feederID := int64(feederID) + // #nosec G115 + if _, ok := existsFeederIDs[feederID]; !ok && (tokenFeeder.EndBlock == 0 || tokenFeeder.EndBlock > uint64(height)) { + logger.Info("[mem] add new round", "feederID", feederID, "height", height) + f.sortedFeederIDs = append(f.sortedFeederIDs, feederID) + f.rounds[feederID] = newRound(feederID, tokenFeeder, int64(params.MaxNonce), f.cs, NewAggMedian()) + } + } + f.sortedFeederIDs.sort() + } +} + +func (f *FeederManager) removeExpiredRounds(ctx sdk.Context) { + height := ctx.BlockHeight() + expiredFeederIDs := make([]int64, 0) + // safe to range map, we generate the slice, the content of elements would be the same, order does not matter + for _, r := range f.rounds { + if r.endBlock > 0 && r.endBlock <= height { + expiredFeederIDs = append(expiredFeederIDs, r.feederID) + } + } + // the order does not matter when remove item from slice as RemoveNonceWithFeederIDForAll does + expiredFeederIDsToRemoveUint64 := make([]uint64, 0) + for _, feederID := range expiredFeederIDs { + if r := f.rounds[feederID]; r.status != roundStatusClosed { + r.closeQuotingWindow() + // #nosec G115 + expiredFeederIDsToRemoveUint64 = append(expiredFeederIDsToRemoveUint64, uint64(feederID)) + } + delete(f.rounds, feederID) + f.sortedFeederIDs.remove(feederID) + } + if len(expiredFeederIDsToRemoveUint64) > 0 { + f.k.RemoveNonceWithFeederIDsForValidators(ctx, expiredFeederIDsToRemoveUint64, f.cs.GetValidators()) + } +} + +func (f *FeederManager) updateAndCommitRoundsInRecovery(ctx sdk.Context) { + f.setCommittableState(ctx) + f.commitRoundsInRecovery() + f.handleQuotingMisBehaviorInRecovery(ctx) + f.updateRoundsParamsAndAddNewRounds(ctx) + f.removeExpiredRounds(ctx) +} + +func (f *FeederManager) updateAndCommitRounds(ctx sdk.Context) { + f.setCommittableState(ctx) + f.commitRounds(ctx) + // behaviors review and close quotingWindow + f.handleQuotingMisBehavior(ctx) + f.updateRoundsParamsAndAddNewRounds(ctx) + f.removeExpiredRounds(ctx) +} + +func (f *FeederManager) ResetFlags() { + f.paramsUpdated = false + f.validatorsUpdated = false + f.forceSeal = false + f.resetSlashing = false +} + +func (f *FeederManager) SetParamsUpdated() { + f.paramsUpdated = true +} + +func (f *FeederManager) SetValidatorsUpdated() { + f.validatorsUpdated = true +} + +func (f *FeederManager) SetResetSlasing() { + f.resetSlashing = true +} + +func (f *FeederManager) SetForceSeal() { + f.forceSeal = true +} + +func (f *FeederManager) ValidateMsg(msg *oracletypes.MsgCreatePrice) error { + // nonce, feederID, creator has been verified by anteHandler + // baseBlock is going to be verified by its corresponding round + decimal, err := f.cs.GetDecimalFromFeederID(msg.FeederID) + if err != nil { + return err + } + for _, ps := range msg.Prices { + // #nosec G115 + deterministic, err := f.cs.IsDeterministic(int64(ps.SourceID)) + if err != nil { + return err + } + l := len(ps.Prices) + if deterministic { + if l == 0 { + return fmt.Errorf("source:id_%d has no valid price, empty list", ps.SourceID) + } + if l > int(f.cs.GetMaxNonce()) { + return fmt.Errorf("deterministic source:id_%d must provide no more than %d prices from different DetIDs, got:%d", ps.SourceID, f.cs.GetMaxNonce(), l) + } + seenDetIDs := make(map[string]struct{}) + for _, p := range ps.Prices { + if _, ok := seenDetIDs[p.DetID]; ok { + return errors.New("duplicated detIDs") + } + if len(p.Price) == 0 { + return errors.New("price must not be empty") + } + if len(p.DetID) == 0 { + return errors.New("detID of deteministic price must not be empty") + } + if p.Decimal != decimal { + return fmt.Errorf("decimal not match for feederID:%d, expect:%d, got:%d", msg.FeederID, decimal, p.Decimal) + } + seenDetIDs[p.DetID] = struct{}{} + } + } else { + // NOTE: v1 does not actually have this type of sources + if l != 1 { + return fmt.Errorf("non-deteministic sources should provide exactly one valid price, got:%d", len(ps.Prices)) + } + p := ps.Prices[0] + if len(p.Price) == 0 { + return errors.New("price must not be empty") + } + if p.Decimal != decimal { + return fmt.Errorf("decimal not match for feederID:%d, expect:%d, got:%d", msg.FeederID, decimal, p.Decimal) + } + if len(p.DetID) > 0 { + return errors.New("price from non-deterministic should not have detID") + } + if len(p.Timestamp) == 0 { + return errors.New("price from non-deterministic must have timestamp") + } + } + } + return nil +} + +func (f *FeederManager) ProcessQuote(ctx sdk.Context, msg *oracletypes.MsgCreatePrice, isCheckTx bool) (*oracletypes.PriceTimeRound, error) { + if isCheckTx { + f = f.getCheckTx() + } + if err := f.ValidateMsg(msg); err != nil { + return nil, oracletypes.ErrInvalidMsg.Wrap(err.Error()) + } + msgItem := getProtoMsgItemFromQuote(msg) + + // #nosec G115 // feederID is index of slice + r, ok := f.rounds[int64(msgItem.FeederID)] + if !ok { + // This should not happened since we do check the nonce in anthHandle + return nil, fmt.Errorf("round not exists for feederID:%d, proposer:%s", msgItem.FeederID, msgItem.Validator) + } + + // #nosec G115 // baseBlock is block height which is not negative + if valid := r.ValidQuotingBaseBlock(int64(msg.BasedBlock)); !valid { + return nil, fmt.Errorf("failed to process price-feed msg for feederID:%d, round is quoting:%t,quotingWindow is open:%t, expected baseBlock:%d, got baseBlock:%d", msgItem.FeederID, r.IsQuoting(), r.IsQuotingWindowOpen(), r.roundBaseBlock, msg.BasedBlock) + } + + // tally msgItem + finalPrice, validMsgItem, err := r.Tally(msgItem) + + // record msgItem in caches if needed + defer func() { + if !isCheckTx && + validMsgItem != nil && + (err == nil || sdkerrors.IsOf(err, oracletypes.ErrQuoteRecorded)) { + _ = f.cs.AddCache(validMsgItem) + } + }() + + if err != nil { + return nil, err + } + + if finalPrice == nil { + return nil, nil + } + return finalPrice.ProtoPriceTimeRound(r.roundID, ctx.BlockTime().Format(oracletypes.TimeLayout)), nil +} + +func (f *FeederManager) getCheckTx() *FeederManager { + fCheckTx := f.fCheckTx + ret := *fCheckTx + ret.fCheckTx = nil + + // rounds + rounds := make(map[int64]*round) + // safe to range map, map copy + for id, r := range fCheckTx.rounds { + rounds[id] = r.CopyForCheckTx() + } + ret.rounds = rounds + + return &ret +} + +func (f *FeederManager) updateCheckTx() { + // flgas are taken care of + // sortedFeederIDs will not be modified except in abci.EndBlock + // successFeedereIDs will not be modifed except in abci.EndBlock + // caches will not be modifed except in abci.EndBlock, abci.DeliverTx (in abci.Query_simulate, or abci.CheckTx the update in ProcessQuote is forbided) + // shallow copy is good enough for these fields + + ret := *f + ret.fCheckTx = nil + + rounds := make(map[int64]*round) + + // safe to range map, map copy + for id, r := range f.rounds { + rounds[id] = r.CopyForCheckTx() + } + ret.rounds = rounds + f.fCheckTx = &ret +} + +func (f *FeederManager) ProcessQuoteInRecovery(msgItems []*oracletypes.MsgItem) { + for _, msgItem := range msgItems { + // #nosec G115 // feederID is index of slice + r, ok := f.rounds[int64(msgItem.FeederID)] + if !ok { + continue + } + // error deos not need to be handled in recovery mode + //nolint:all + r.Tally(msgItem) + } +} + +// initCaches initializes the caches of the FeederManager with keeper, params, validatorPowers +func (f *FeederManager) initCaches(ctx sdk.Context) { + f.cs = newCaches() + params := f.k.GetParams(ctx) + validatorSet := f.k.GetAllExocoreValidators(ctx) + validatorPowers := make(map[string]*big.Int) + for _, v := range validatorSet { + validatorPowers[sdk.ConsAddress(v.Address).String()] = big.NewInt(v.Power) + } + f.cs.Init(f.k, ¶ms, validatorPowers) +} + +func (f *FeederManager) recovery(ctx sdk.Context) (bool, error) { + height := ctx.BlockHeight() + recentParamsList, prevRecentParams, latestRecentParams := f.k.GetRecentParamsWithinMaxNonce(ctx) + if latestRecentParams.Block == 0 { + return false, nil + } + validatorUpdateBlock, found := f.k.GetValidatorUpdateBlock(ctx) + if !found { + // on recovery mode, the validator update block must be found + return false, errors.New("validator update block not found in recovery mode for feeder manager") + } + // #nosec G115 // validatorUpdateBlock.Block represents blockheight + startHeight, replayRecentParamsList := getRecoveryStartPoint(height, recentParamsList, &prevRecentParams, &latestRecentParams, int64(validatorUpdateBlock.Block)) + + f.cs = newCaches() + params := replayRecentParamsList[0].Params + replayRecentParamsList = replayRecentParamsList[1:] + + validatorSet := f.k.GetAllExocoreValidators(ctx) + validatorPowers := make(map[string]*big.Int) + for _, v := range validatorSet { + validatorPowers[sdk.ConsAddress(v.Address).String()] = big.NewInt(v.Power) + } + + f.cs.Init(f.k, params, validatorPowers) + + replayHeight := startHeight - 1 + + ctxReplay := ctx.WithBlockHeight(replayHeight) + for tfID, tf := range params.TokenFeeders { + if tfID == 0 { + continue + } + // #nosec G115 // safe conversion + if tf.EndBlock > 0 && int64(tf.EndBlock) <= replayHeight { + continue + } + tfID := int64(tfID) + f.rounds[tfID] = newRound(tfID, tf, int64(params.MaxNonce), f.cs, NewAggMedian()) + f.sortedFeederIDs.add(tfID) + } + f.prepareRounds(ctxReplay) + + params = nil + recentMsgs := f.k.GetAllRecentMsg(ctxReplay) + for ; startHeight < height; startHeight++ { + ctxReplay = ctxReplay.WithBlockHeight(startHeight) + // only execute msgItems corresponding to rounds opened on or after replayHeight, since any rounds opened before replay height must be closed on or before height-1 + // which means no memory state need to be updated for thoes rounds + // and we don't need to take care of 'close quoting-window' since the size of replay window t most equals to maxNonce + // #nosec G115 // block is not negative + if len(recentMsgs) > 0 && int64(recentMsgs[0].Block) <= startHeight { + i := 0 + for idx, recentMsg := range recentMsgs { + // #nosec G115 // block height is defined as int64 in cosmossdk + if int64(recentMsg.Block) > startHeight { + break + } + i = idx + if int64(recentMsg.Block) == startHeight { + f.ProcessQuoteInRecovery(recentMsg.Msgs) + break + } + } + recentMsgs = recentMsgs[i+1:] + } + // #nosec G115 + if len(replayRecentParamsList) > 0 && int64(replayRecentParamsList[0].Block) == startHeight { + params = replayRecentParamsList[0].Params + replayRecentParamsList = replayRecentParamsList[1:] + } + f.EndBlockInRecovery(ctxReplay, params) + } + + f.cs.SkipCommit() + + return true, nil +} + +func (f *FeederManager) Equals(fm *FeederManager) bool { + if f == nil || fm == nil { + return f == fm + } + if f.fCheckTx == nil && fm.fCheckTx != nil { + return false + } + if f.fCheckTx != nil && fm.fCheckTx == nil { + return false + } + if !f.fCheckTx.Equals(fm.fCheckTx) { + return false + } + if f.paramsUpdated != fm.paramsUpdated || + f.validatorsUpdated != fm.validatorsUpdated || + f.resetSlashing != fm.resetSlashing || + f.forceSeal != fm.forceSeal { + return false + } + if !f.sortedFeederIDs.Equals(fm.sortedFeederIDs) { + return false + } + if !f.cs.Equals(fm.cs) { + return false + } + if len(f.rounds) != len(fm.rounds) { + return false + } + // safe to range map, compare map + for id, r := range f.rounds { + if r2, ok := fm.rounds[id]; !ok { + return false + } else if !r.Equals(r2) { + return false + } + } + return true +} + +// recoveryStartPoint returns the height to start the recovery process +func getRecoveryStartPoint(currentHeight int64, recentParamsList []*oracletypes.RecentParams, prevRecentParams, latestRecentParams *oracletypes.RecentParams, validatorUpdateHeight int64) (height int64, replayRecentParamsList []*oracletypes.RecentParams) { + if currentHeight > int64(latestRecentParams.Params.MaxNonce) { + height = currentHeight - int64(latestRecentParams.Params.MaxNonce) + } + // there is no params updated in the recentParamsList, we can start from the validator update block if it's not too old(out of the distance of maxNonce from current height) + if len(recentParamsList) == 0 { + if height < validatorUpdateHeight { + height = validatorUpdateHeight + } + // for empty recetParamsList, use latestrecentParams as the start point + replayRecentParamsList = append(replayRecentParamsList, latestRecentParams) + height++ + return height, replayRecentParamsList + } + + if prevRecentParams.Block > 0 && prevRecentParams.Params.IsForceSealingUpdate(recentParamsList[0].Params) { + // #nosec G115 + height = int64(recentParamsList[0].Block) + } + idx := 0 + for i := 1; i < len(recentParamsList); i++ { + if recentParamsList[i-1].Params.IsForceSealingUpdate(recentParamsList[i].Params) { + // #nosec G115 + height = int64(recentParamsList[i].Block) + idx = i + } + } + replayRecentParamsList = recentParamsList[idx:] + + if height < validatorUpdateHeight { + height = validatorUpdateHeight + } + height++ + return height, replayRecentParamsList +} + +func getProtoMsgItemFromQuote(msg *oracletypes.MsgCreatePrice) *oracletypes.MsgItem { + // address has been valid before + validator, _ := oracletypes.ConsAddrStrFromCreator(msg.Creator) + + return &oracletypes.MsgItem{ + FeederID: msg.FeederID, + // validator's consAddr + Validator: validator, + PSources: msg.Prices, + } +} diff --git a/x/oracle/keeper/feedermanagement/feedermanager_test.go b/x/oracle/keeper/feedermanagement/feedermanager_test.go new file mode 100644 index 000000000..341b316e8 --- /dev/null +++ b/x/oracle/keeper/feedermanagement/feedermanager_test.go @@ -0,0 +1,101 @@ +package feedermanagement + +import ( + "math/big" + "testing" + + gomock "go.uber.org/mock/gomock" + + "github.com/ExocoreNetwork/exocore/x/oracle/keeper/testdata" + . "github.com/smartystreets/goconvey/convey" +) + +//go:generate mockgen -destination mock_cachereader_test.go -package feedermanagement github.com/ExocoreNetwork/exocore/x/oracle/keeper/feedermanagement CacheReader + +func TestFeederManagement(t *testing.T) { + Convey("compare FeederManager", t, func() { + fm := NewFeederManager(nil) + ctrl := gomock.NewController(t) + c := NewMockCacheReader(ctrl) + c.EXPECT(). + GetThreshold(). + Return(&threshold{big.NewInt(4), big.NewInt(1), big.NewInt(3)}). + AnyTimes() + Convey("add a new round", func() { + ps1 := priceSource{deterministic: true, prices: []*PriceInfo{{Price: "123"}}} + ps2 := ps1 + fm2 := *fm + + fm.rounds[1] = newRound(1, testdata.DefaultParamsForTest().TokenFeeders[1], 3, c, defaultAggMedian) + fm.rounds[1].PrepareForNextBlock(20) + fm.sortedFeederIDs.add(1) + fm.rounds[1].a.ds.AddPriceSource(&ps1, big.NewInt(1), "v1") + + fm2.rounds = make(map[int64]*round) + fm2.sortedFeederIDs = make([]int64, 0) + fm2.rounds[1] = newRound(1, testdata.DefaultParamsForTest().TokenFeeders[1], 3, c, defaultAggMedian) + fm2.rounds[1].PrepareForNextBlock(20) + fm2.sortedFeederIDs.add(1) + fm2.rounds[1].a.ds.AddPriceSource(&ps2, big.NewInt(1), "v1") + + So(fm.Equals(&fm2), ShouldBeTrue) + }) + }) + Convey("check copy results", t, func() { + ctrl := gomock.NewController(t) + c := NewMockCacheReader(ctrl) + c.EXPECT(). + GetThreshold(). + Return(&threshold{big.NewInt(4), big.NewInt(1), big.NewInt(3)}). + AnyTimes() + + // feedermanager + Convey("copy of feedermanager", func() { + f := tData.NewFeederManager(c) + f.updateCheckTx() + fc := f.fCheckTx + f.fCheckTx = nil + So(f.Equals(fc), ShouldBeTrue) + }) + Convey("copy of round", func() { + r := tData.NewRound(c) + rc := r.CopyForCheckTx() + So(r.Equals(rc), ShouldBeTrue) + }) + Convey("copy of aggregagtor", func() { + a := tData.NewAggregator(true) + ac := a.CopyForCheckTx() + So(a.Equals(ac), ShouldBeTrue) + }) + Convey("copy of recordsValidators", func() { + v := tData.NewRecordsValidators(true) + vc := v.Cpy() + So(v.Equals(vc), ShouldBeTrue) + }) + Convey("copy of recordsDSs", func() { + dss := tData.NewRecordsDSs(true) + dssc := dss.Cpy() + So(dss.Equals(dssc), ShouldBeTrue) + }) + Convey("copy of recordsDS", func() { + ds := tData.NewRecordsDS(true) + dsc := ds.Cpy() + So(ds.Equals(dsc), ShouldBeTrue) + }) + Convey("copy of priceValidator", func() { + pv := tData.NewPriceValidator(true) + pvc := pv.Cpy() + So(pv.Equals(pvc), ShouldBeTrue) + }) + Convey("copy of priceSource", func() { + ps := tData.NewPriceSource(true, true) + psc := ps.Cpy() + So(ps.Equals(psc), ShouldBeTrue) + }) + Convey("copy of pricePower", func() { + pw := tData.NewPricePower() + pwc := pw.Cpy() + So(pw.Equals(pwc), ShouldBeTrue) + }) + }) +} diff --git a/x/oracle/keeper/feedermanagement/helper_test.go b/x/oracle/keeper/feedermanagement/helper_test.go new file mode 100644 index 000000000..c9c97eb2f --- /dev/null +++ b/x/oracle/keeper/feedermanagement/helper_test.go @@ -0,0 +1,117 @@ +package feedermanagement + +import ( + "math/big" + "math/rand" + + "github.com/ExocoreNetwork/exocore/x/oracle/keeper/testdata" + oracletypes "github.com/ExocoreNetwork/exocore/x/oracle/types" +) + +type Test struct { +} + +var ( + tData *Test + params = testdata.DefaultParamsForTest() + r = rand.New(rand.NewSource(1)) + timestamp = "2025-01-01 00:01:02" + decimal = int32(8) + big1 = big.NewInt(1) + big2 = big.NewInt(2) + big3 = big.NewInt(3) + big4 = big.NewInt(4) + th = &threshold{big4, big2, big3} +) + +func (t *Test) NewFeederManager(cs CacheReader) *FeederManager { + f := NewFeederManager(nil) + round := t.NewRound(cs) + f.rounds[round.feederID] = round + // prepare this Round + round.PrepareForNextBlock(int64(params.TokenFeeders[int(round.feederID)].StartBaseBlock)) + return f +} + +func (t *Test) NewPricePower() *PricePower { + return &PricePower{ + Price: t.NewPriceInfo("999", "1"), + Power: big1, + Validators: map[string]struct{}{"validator1": {}}, + } +} + +func (t *Test) NewPriceSource(deterministic bool, filled bool) *priceSource { + ret := newPriceSource(oracletypes.SourceChainlinkID, deterministic) + if filled { + price := t.NewPriceInfo("999", "1") + ret.prices = append(ret.prices, price) + } + return ret +} + +func (t *Test) NewPriceValidator(filled bool) *priceValidator { + ret := newPriceValidator("validator1", big1) + if filled { + ps := t.NewPriceSource(true, true) + ret.priceSources[oracletypes.SourceChainlinkID] = ps + } + return ret +} + +func (t *Test) NewRecordsDS(filled bool) *recordsDS { + ret := newRecordsDS() + if filled { + ret.validators["validtors"] = struct{}{} + ret.accumulatedPowers = big1 + ret.records = append(ret.records, t.NewPricePower()) + } + return ret +} + +func (t *Test) NewRecordsDSs(filled bool) *recordsDSs { + ret := newRecordsDSs(th) + if filled { + rds := t.NewRecordsDS(filled) + ret.dsMap[oracletypes.SourceChainlinkID] = rds + } + return nil +} + +func (t *Test) NewRecordsValidators(filled bool) *recordsValidators { + ret := newRecordsValidators() + if filled { + ret.accumulatedPower = big1 + ret.records["validtor1"] = t.NewPriceValidator(filled) + } + return nil +} + +func (t *Test) NewAggregator(filled bool) *aggregator { + ret := newAggregator(th, defaultAggMedian) + if filled { + ret.v = t.NewRecordsValidators(filled) + ret.ds = t.NewRecordsDSs(filled) + } + return ret +} + +func (t *Test) NewRound(cs CacheReader) *round { + feederID := r.Intn(len(params.TokenFeeders)-1) + 1 + round := newRound(int64(feederID), params.TokenFeeders[feederID], int64(params.MaxNonce), cs, defaultAggMedian) + return round +} + +func (t *Test) NewRoundWithFeederID(cs CacheReader, feederID int64) *round { + round := newRound(feederID, params.TokenFeeders[feederID], int64(params.MaxNonce), cs, defaultAggMedian) + return round +} + +func (f *Test) NewPriceInfo(price string, detID string) *PriceInfo { + return &PriceInfo{ + Price: price, + Decimal: decimal, + DetID: detID, + Timestamp: timestamp, + } +} diff --git a/x/oracle/keeper/feedermanagement/mock_cachereader_test.go b/x/oracle/keeper/feedermanagement/mock_cachereader_test.go new file mode 100644 index 000000000..0aeebca0b --- /dev/null +++ b/x/oracle/keeper/feedermanagement/mock_cachereader_test.go @@ -0,0 +1,127 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ExocoreNetwork/exocore/x/oracle/keeper/feedermanagement (interfaces: CacheReader) +// +// Generated by this command: +// +// mockgen -destination mock_cachereader_test.go -package feedermanagement github.com/ExocoreNetwork/exocore/x/oracle/keeper/feedermanagement CacheReader +// + +// Package feedermanagement is a generated GoMock package. +package feedermanagement + +import ( + big "math/big" + reflect "reflect" + + gomock "go.uber.org/mock/gomock" +) + +// MockCacheReader is a mock of CacheReader interface. +type MockCacheReader struct { + ctrl *gomock.Controller + recorder *MockCacheReaderMockRecorder + isgomock struct{} +} + +// MockCacheReaderMockRecorder is the mock recorder for MockCacheReader. +type MockCacheReaderMockRecorder struct { + mock *MockCacheReader +} + +// NewMockCacheReader creates a new mock instance. +func NewMockCacheReader(ctrl *gomock.Controller) *MockCacheReader { + mock := &MockCacheReader{ctrl: ctrl} + mock.recorder = &MockCacheReaderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockCacheReader) EXPECT() *MockCacheReaderMockRecorder { + return m.recorder +} + +// GetPowerForValidator mocks base method. +func (m *MockCacheReader) GetPowerForValidator(validator string) (*big.Int, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPowerForValidator", validator) + ret0, _ := ret[0].(*big.Int) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// GetPowerForValidator indicates an expected call of GetPowerForValidator. +func (mr *MockCacheReaderMockRecorder) GetPowerForValidator(validator any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPowerForValidator", reflect.TypeOf((*MockCacheReader)(nil).GetPowerForValidator), validator) +} + +// GetThreshold mocks base method. +func (m *MockCacheReader) GetThreshold() *threshold { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetThreshold") + ret0, _ := ret[0].(*threshold) + return ret0 +} + +// GetThreshold indicates an expected call of GetThreshold. +func (mr *MockCacheReaderMockRecorder) GetThreshold() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetThreshold", reflect.TypeOf((*MockCacheReader)(nil).GetThreshold)) +} + +// GetTotalPower mocks base method. +func (m *MockCacheReader) GetTotalPower() *big.Int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTotalPower") + ret0, _ := ret[0].(*big.Int) + return ret0 +} + +// GetTotalPower indicates an expected call of GetTotalPower. +func (mr *MockCacheReaderMockRecorder) GetTotalPower() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTotalPower", reflect.TypeOf((*MockCacheReader)(nil).GetTotalPower)) +} + +// GetValidators mocks base method. +func (m *MockCacheReader) GetValidators() []string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetValidators") + ret0, _ := ret[0].([]string) + return ret0 +} + +// GetValidators indicates an expected call of GetValidators. +func (mr *MockCacheReaderMockRecorder) GetValidators() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValidators", reflect.TypeOf((*MockCacheReader)(nil).GetValidators)) +} + +// IsDeterministic mocks base method. +func (m *MockCacheReader) IsDeterministic(sournceID int64) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsDeterministic", sournceID) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IsDeterministic indicates an expected call of IsDeterministic. +func (mr *MockCacheReaderMockRecorder) IsDeterministic(sournceID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsDeterministic", reflect.TypeOf((*MockCacheReader)(nil).IsDeterministic), sournceID) +} + +// IsRuleV1 mocks base method. +func (m *MockCacheReader) IsRuleV1(feederID int64) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsRuleV1", feederID) + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsRuleV1 indicates an expected call of IsRuleV1. +func (mr *MockCacheReaderMockRecorder) IsRuleV1(feederID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsRuleV1", reflect.TypeOf((*MockCacheReader)(nil).IsRuleV1), feederID) +} diff --git a/x/oracle/keeper/feedermanagement/prices.go b/x/oracle/keeper/feedermanagement/prices.go new file mode 100644 index 000000000..d699b066b --- /dev/null +++ b/x/oracle/keeper/feedermanagement/prices.go @@ -0,0 +1,384 @@ +package feedermanagement + +import ( + "errors" + "fmt" + "math/big" + "reflect" + "slices" + "sort" + + oracletypes "github.com/ExocoreNetwork/exocore/x/oracle/types" +) + +func GetPriceInfoFromProtoPriceTimeDetID(p *oracletypes.PriceTimeDetID) *PriceInfo { + if p == nil { + return nil + } + return &PriceInfo{ + Price: p.Price, + Decimal: p.Decimal, + Timestamp: p.Timestamp, + DetID: p.DetID, + } +} + +func (p *PriceInfo) ProtoPriceTimeDetID() *oracletypes.PriceTimeDetID { + if p == nil { + return nil + } + return &oracletypes.PriceTimeDetID{ + Price: p.Price, + Decimal: p.Decimal, + Timestamp: p.Timestamp, + DetID: p.DetID, + } +} + +func (p *PriceInfo) EqualDS(p2 *PriceInfo) bool { + if p == nil || p2 == nil { + return p == p2 + } + return p.Price == p2.Price && p.DetID == p2.DetID && p.Decimal == p2.Decimal +} + +func (p *PriceInfo) PriceResult() *PriceResult { + if p == nil { + return nil + } + return &PriceResult{ + Price: p.Price, + Decimal: p.Decimal, + DetID: p.DetID, + Timestamp: p.Timestamp, + } +} + +func (p *PriceResult) PriceInfo() *PriceInfo { + if p == nil { + return nil + } + return &PriceInfo{ + Price: p.Price, + Decimal: p.Decimal, + DetID: p.DetID, + Timestamp: p.Timestamp, + } +} + +func (p *PriceResult) ProtoPriceTimeRound(roundID int64, timestamp string) *oracletypes.PriceTimeRound { + return &oracletypes.PriceTimeRound{ + Price: p.Price, + Decimal: p.Decimal, + Timestamp: timestamp, + // #nosec G115 + RoundID: uint64(roundID), + } +} + +func getPriceSourceFromProto(ps *oracletypes.PriceSource, checker sourceChecker) (*priceSource, error) { + prices := make([]*PriceInfo, 0, len(ps.Prices)) + // #nosec G115 + deterministic, err := checker.IsDeterministic(int64(ps.SourceID)) + if err != nil { + return nil, err + } + for _, p := range ps.Prices { + prices = append(prices, GetPriceInfoFromProtoPriceTimeDetID(p)) + } + return &priceSource{ + // #nosec G115 + deterministic: deterministic, + // #nosec G115 + sourceID: int64(ps.SourceID), + prices: prices, + }, nil +} + +func newPriceValidator(validator string, power *big.Int) *priceValidator { + return &priceValidator{ + finalPrice: nil, + validator: validator, + power: new(big.Int).Set(power), + priceSources: make(map[int64]*priceSource), + } +} + +func (pv *priceValidator) Cpy() *priceValidator { + if pv == nil { + return nil + } + var finalPrice *PriceResult + if pv.finalPrice != nil { + tmp := *pv.finalPrice + finalPrice = &tmp + } + priceSources := make(map[int64]*priceSource) + // safe to range map, map copy + for id, ps := range pv.priceSources { + priceSources[id] = ps.Cpy() + } + return &priceValidator{ + finalPrice: finalPrice, + validator: pv.validator, + power: new(big.Int).Set(pv.power), + priceSources: priceSources, + } +} + +func (pv *priceValidator) Equals(pv2 *priceValidator) bool { + if pv == nil || pv2 == nil { + return pv == pv2 + } + if pv.validator != pv2.validator || pv.power.Cmp(pv2.power) != 0 { + return false + } + if len(pv.priceSources) != len(pv2.priceSources) { + return false + } + // safe to range map, map compare + for id, ps := range pv.priceSources { + ps2, ok := pv2.priceSources[id] + if !ok || !ps.Equals(ps2) { + return false + } + } + return true +} + +func (pv *priceValidator) GetPSCopy(sourceID int64, deterministic bool) *priceSource { + if ps, ok := pv.priceSources[sourceID]; ok { + return ps.Cpy() + } + return newPriceSource(sourceID, deterministic) +} + +func (pv *priceValidator) TryAddPriceSources(pSs []*priceSource) (updated map[int64]*priceSource, added []*priceSource, err error) { + var es errorStr + updated = make(map[int64]*priceSource) + for _, psNew := range pSs { + ps, ok := updated[psNew.sourceID] + if !ok { + ps, ok = pv.priceSources[psNew.sourceID] + if !ok { + ps = newPriceSource(psNew.sourceID, psNew.deterministic) + } else { + ps = ps.Cpy() + } + } + psAdded, err := ps.Add(psNew) + if err != nil { + es.add(fmt.Sprintf("sourceID:%d, error:%s", psNew.sourceID, err.Error())) + } else { + updated[psNew.sourceID] = ps + added = append(added, psAdded) + } + } + if len(updated) > 0 { + return updated, added, nil + } + return nil, nil, fmt.Errorf("failed to add priceSource listi, error:%s", es) +} + +func (pv *priceValidator) ApplyAddedPriceSources(psMap map[int64]*priceSource) { + // safe to range map, set all k-v to antoher map + for id, ps := range psMap { + pv.priceSources[id] = ps + } +} + +// TODO: V2: check valdiator has provided all sources required by rules(defined in oracle.params) +func (pv *priceValidator) GetFinalPrice(algo AggAlgorithm) (*PriceResult, bool) { + if pv.finalPrice != nil { + return pv.finalPrice, true + } + if len(pv.priceSources) == 0 { + return nil, false + } + keySlice := make([]int64, 0, len(pv.priceSources)) + // safe to range map, the map is iteration to genrate sorted key slice + for sourceID := range pv.priceSources { + keySlice = append(keySlice, sourceID) + } + slices.Sort(keySlice) + algo.Reset() + for _, sourceID := range keySlice { + price := pv.priceSources[sourceID] + if price.finalPrice == nil { + algo.Reset() + return nil, false + } + if !algo.Add(price.finalPrice) { + algo.Reset() + return nil, false + } + } + if ret := algo.GetResult(); ret != nil { + pv.finalPrice = ret + return ret, true + } + return nil, false +} + +func (pv *priceValidator) UpdateFinalPriceForDS(sourceID int64, finalPrice *PriceResult) bool { + if finalPrice == nil { + return false + } + if price, ok := pv.priceSources[sourceID]; ok { + price.finalPrice = finalPrice + return true + } + return false +} + +func newPriceSource(sourceID int64, deterministic bool) *priceSource { + return &priceSource{ + deterministic: deterministic, + finalPrice: nil, + sourceID: sourceID, + detIDs: make(map[string]struct{}), + prices: make([]*PriceInfo, 0), + } +} + +func (ps *priceSource) Equals(ps2 *priceSource) bool { + if ps == nil || ps2 == nil { + return ps == ps2 + } + if ps.sourceID != ps2.sourceID || ps.deterministic != ps2.deterministic { + return false + } + if !reflect.DeepEqual(ps.detIDs, ps2.detIDs) { + return false + } + if !reflect.DeepEqual(ps.finalPrice, ps2.finalPrice) { + return false + } + if len(ps.prices) != len(ps2.prices) { + return false + } + if !reflect.DeepEqual(ps.prices, ps2.prices) { + return false + } + return true +} + +func (ps *priceSource) Cpy() *priceSource { + if ps == nil { + return nil + } + var finalPrice *PriceResult + if ps.finalPrice != nil { + tmp := *ps.finalPrice + finalPrice = &tmp + } + // deterministic, sourceID + detIDs := make(map[string]struct{}) + // safe to range map, map copy + for detID := range ps.detIDs { + detIDs[detID] = struct{}{} + } + prices := make([]*PriceInfo, 0, len(ps.prices)) + for _, p := range ps.prices { + pCpy := *p + prices = append(prices, &pCpy) + } + return &priceSource{ + deterministic: ps.deterministic, + finalPrice: finalPrice, + sourceID: ps.sourceID, + detIDs: detIDs, + prices: prices, + } +} + +// Add adds prices of a source from priceSource +// we don't verify the input is DS or NS, it's just handled under the rule restrict by p.deterministic +func (ps *priceSource) Add(psNew *priceSource) (*priceSource, error) { + if ps.sourceID != psNew.sourceID { + return nil, fmt.Errorf("failed to add priceSource, sourceID mismatch, expected:%d, got:%d", ps.sourceID, psNew.sourceID) + } + + if !ps.deterministic { + if len(psNew.prices) == 0 { + return nil, errors.New("failed to add ProtoPriceSource for NS, psNew.prices is empty") + } + + // this is not ds, then just set the final price or overwrite if the input has a later timestamp + if ps.finalPrice == nil || + ps.finalPrice.Timestamp < psNew.prices[0].Timestamp { + ps.finalPrice = psNew.prices[0].PriceResult() + ps.prices = append(ps.prices, psNew.prices[0]) + psNew.prices = psNew.prices[:1] + return ps, nil + } + return nil, errors.New("failed to add ProtoPriceSource for NS, timestamp is old") + } + + var es errorStr + added := false + ret := &priceSource{ + deterministic: ps.deterministic, + sourceID: ps.sourceID, + prices: make([]*PriceInfo, 0), + } + for _, pNew := range psNew.prices { + if _, ok := ps.detIDs[pNew.DetID]; ok { + es.add(fmt.Sprintf("duplicated DetID:%s", pNew.DetID)) + continue + } + added = true + ps.detIDs[pNew.DetID] = struct{}{} + ps.prices = append(ps.prices, pNew) + ret.prices = append(ret.prices, pNew) + } + + if !added { + return nil, fmt.Errorf("failed to add ProtoPriceSource, sourceID:%d, errors:%s", ps.sourceID, es) + } + + sort.SliceStable(ps.prices, func(i, j int) bool { + return ps.prices[i].DetID < ps.prices[j].DetID + }) + return ret, nil +} + +func (p *PricePower) Equals(p2 *PricePower) bool { + if p == nil || p2 == nil { + return p == p2 + } + if !reflect.DeepEqual(p.Price, p2.Price) || p.Power.Cmp(p2.Power) != 0 { + return false + } + if len(p.Validators) != len(p2.Validators) { + return false + } + // safe to range map, map compare + for v := range p.Validators { + if _, ok := p2.Validators[v]; !ok { + return false + } + } + return true +} + +func (p *PricePower) Cpy() *PricePower { + price := *p.Price + validators := make(map[string]struct{}) + // safe to range map, map copy + for v := range p.Validators { + validators[v] = struct{}{} + } + return &PricePower{ + Price: &price, + Power: new(big.Int).Set(p.Power), + Validators: validators, + } +} + +type errorStr string + +func (e *errorStr) add(s string) { + es := string(*e) + *e = errorStr(fmt.Sprintf("%s[%s]", es, s)) +} diff --git a/x/oracle/keeper/feedermanagement/round.go b/x/oracle/keeper/feedermanagement/round.go new file mode 100644 index 000000000..0258a0937 --- /dev/null +++ b/x/oracle/keeper/feedermanagement/round.go @@ -0,0 +1,266 @@ +package feedermanagement + +import ( + "fmt" + + oracletypes "github.com/ExocoreNetwork/exocore/x/oracle/types" +) + +func newRound(feederID int64, tokenFeeder *oracletypes.TokenFeeder, quoteWindowSize int64, cache CacheReader, algo AggAlgorithm) *round { + return &round{ + // #nosec G115 + startBaseBlock: int64(tokenFeeder.StartBaseBlock), + // #nosec G115 + startRoundID: int64(tokenFeeder.StartRoundID), + // #nosec G115 + endBlock: int64(tokenFeeder.EndBlock), + // #nosec G115 + interval: int64(tokenFeeder.Interval), + quoteWindowSize: quoteWindowSize, + feederID: feederID, + // #nosec G115 + tokenID: int64(tokenFeeder.TokenID), + cache: cache, + + // default value + status: roundStatusClosed, + a: nil, + roundBaseBlock: 0, + roundID: 0, + algo: algo, + } +} + +func (r *round) Equals(r2 *round) bool { + if r == nil || r2 == nil { + return r == r2 + } + + if r.startBaseBlock != r2.startBaseBlock || + r.startRoundID != r2.startRoundID || + r.endBlock != r2.endBlock || + r.interval != r2.interval || + r.quoteWindowSize != r2.quoteWindowSize || + r.feederID != r2.feederID || + r.tokenID != r2.tokenID || + r.roundBaseBlock != r2.roundBaseBlock || + r.roundID != r2.roundID || + r.status != r2.status { + return false + } + if !r.a.Equals(r2.a) { + return false + } + + return true +} + +func (r *round) CopyForCheckTx() *round { + // flags has been taken care of + ret := *r + // cache does not need to be copied since it's a readonly interface, + // and there's no race condition since abci requests are not executing concurrntly + ret.a = ret.a.CopyForCheckTx() + return &ret +} + +func (r *round) getMsgItemFromProto(msg *oracletypes.MsgItem) (*MsgItem, error) { + power, found := r.cache.GetPowerForValidator(msg.Validator) + if !found { + return nil, fmt.Errorf("failed to get power for validator:%s", msg.Validator) + } + priceSources := make([]*priceSource, 0, len(msg.PSources)) + for _, ps := range msg.PSources { + psNew, err := getPriceSourceFromProto(ps, r.cache) + if err != nil { + return nil, err + } + priceSources = append(priceSources, psNew) + } + return &MsgItem{ + // #nosec G115 + FeederID: int64(msg.FeederID), + Validator: msg.Validator, + Power: power, + PriceSources: priceSources, + }, nil +} + +func (r *round) ValidQuotingBaseBlock(height int64) bool { + return r.IsQuotingWindowOpen() && r.roundBaseBlock == height +} + +// Tally process information to get the final price +// it does not verify if the msg is for the corresponding round(roundid/roundBaseBlock) +// TODO: use valid value instead of the original protoMsg in return +func (r *round) Tally(protoMsg *oracletypes.MsgItem) (*PriceResult, *oracletypes.MsgItem, error) { + if !r.IsQuotingWindowOpen() { + return nil, nil, fmt.Errorf("quoting window is not open, feederID:%d", r.feederID) + } + + msg, err := r.getMsgItemFromProto(protoMsg) + if err != nil { + return nil, nil, fmt.Errorf("failed to get msgItem from proto, error:%w", err) + } + if !r.IsQuoting() { + // record msg for 'handlQuotingMisBehavior' + err := r.a.RecordMsg(msg) + if err == nil { + return nil, protoMsg, oracletypes.ErrQuoteRecorded + } + return nil, nil, fmt.Errorf("failed to record quote for aggregated round, error:%w", err) + } + + err = r.a.AddMsg(msg) + if err != nil { + return nil, nil, fmt.Errorf("failed to add quote for aggregation of feederID:%d, roundID:%d, error:%w", r.feederID, r.roundID, err) + } + + finalPrice, ok := r.FinalPrice() + if ok { + r.status = roundStatusCommittable + // NOTE: for V1, we need return the DetID as well since chainlink is the only source + if r.cache.IsRuleV1(r.feederID) { + detID := r.getFinalDetIDForSourceID(oracletypes.SourceChainlinkID) + finalPrice.DetID = detID + } + return finalPrice, protoMsg, nil + } + + return nil, protoMsg, nil +} + +func (r *round) UpdateParams(tokenFeeder *oracletypes.TokenFeeder, quoteWindowSize int64) { + // #nosec G115 + r.startBaseBlock = int64(tokenFeeder.StartBaseBlock) + // #nosec G115 + r.endBlock = int64(tokenFeeder.EndBlock) + // #nosec G115 + r.interval = int64(tokenFeeder.Interval) + r.quoteWindowSize = quoteWindowSize +} + +// PrepareForNextBlock sets status to Open and create a new aggregator on the block before the first block of quoting +func (r *round) PrepareForNextBlock(currentHeight int64) (open bool) { + if currentHeight < r.roundBaseBlock && r.IsQuoting() { + r.closeQuotingWindow() + return open + } + // currentHeight euqls to baseBlock + if currentHeight == r.roundBaseBlock && !r.IsQuoting() { + r.openQuotingWindow() + open = true + return open + } + baseBlock, roundID, delta, expired := r.getPosition(currentHeight) + + if expired && r.IsQuoting() { + r.closeQuotingWindow() + return open + } + // open a new round + if baseBlock > r.roundBaseBlock { + // move to next round + r.roundBaseBlock = baseBlock + r.roundID = roundID + // the first block in the quoting window + if delta == 0 && !r.IsQuoting() { + r.openQuotingWindow() + open = true + } + } + return open +} + +func (r *round) openQuotingWindow() { + r.status = roundStatusOpen + r.a = newAggregator(r.cache.GetThreshold(), r.algo) +} + +// IsQuotingWindowOpen returns if the round is inside its current quoting window including status of {open, committable, close} +func (r *round) IsQuotingWindowOpen() bool { + // aggregator is set when quoting window open and removed when the window reaches the end or be force sealed + return r.a != nil +} + +func (r *round) IsQuotingWindowEnd(currentHeight int64) bool { + _, _, delta, _ := r.getPosition(currentHeight) + return delta == r.quoteWindowSize +} + +func (r *round) IsQuoting() bool { + return r.status == roundStatusOpen +} + +func (r *round) FinalPrice() (*PriceResult, bool) { + if r.a == nil { + return nil, false + } + return r.a.GetFinalPrice() +} + +// Close sets round status to roundStatusClosed and remove current aggregator +func (r *round) closeQuotingWindow() { + r.status = roundStatusClosed + r.a = nil +} + +func (r *round) PerformanceReview(validator string) (miss, malicious bool) { + finalPrice, ok := r.FinalPrice() + if !ok { + return + } + if !r.cache.IsRuleV1(r.feederID) { + // only rulev1 is supported for now + return + } + detID := r.getFinalDetIDForSourceID(oracletypes.SourceChainlinkID) + price := finalPrice.PriceInfo() + price.DetID = detID + prices, ok := r.a.v.GetValidatorQuotePricesForSourceID(validator, oracletypes.SourceChainlinkID) + if !ok { + miss = true + return + } + for _, p := range prices { + if p.EqualDS(price) { + // duplicated detID had been filtered out, so if an 'euqal' price is found, there will be no 'malicous' price for that detID + return + } + if p.DetID == price.DetID { + malicious = true + return + } + } + miss = true + return +} + +//nolint:unparam +func (r *round) getFinalDetIDForSourceID(sourceID int64) string { + return r.a.ds.GetFinalDetIDForSourceID(sourceID) +} + +func (r *round) Committable() bool { + return r.status == roundStatusCommittable +} + +func (r *round) getPosition(currentHeight int64) (baseBlock, roundID, delta int64, expired bool) { + // endBlock is included + if r.endBlock > 0 && currentHeight > r.endBlock { + expired = true + return + } + if currentHeight < r.startBaseBlock { + return + } + delta = currentHeight - r.startBaseBlock + if r.interval == 0 { + return + } + rounds := delta / r.interval + roundID = r.startRoundID + rounds + delta -= rounds * r.interval + baseBlock = currentHeight - delta + return +} diff --git a/x/oracle/keeper/feedermanagement/types.go b/x/oracle/keeper/feedermanagement/types.go new file mode 100644 index 000000000..99826ff1f --- /dev/null +++ b/x/oracle/keeper/feedermanagement/types.go @@ -0,0 +1,286 @@ +package feedermanagement + +import ( + "math/big" + "sort" + + "github.com/ExocoreNetwork/exocore/x/oracle/keeper/common" + oracletypes "github.com/ExocoreNetwork/exocore/x/oracle/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +type Submitter interface { + SetValidatorUpdateForCache(sdk.Context, oracletypes.ValidatorUpdateBlock) + SetParamsForCache(sdk.Context, oracletypes.RecentParams) + SetMsgItemsForCache(sdk.Context, oracletypes.RecentMsg) +} + +type CacheReader interface { + GetPowerForValidator(validator string) (*big.Int, bool) + GetTotalPower() (totalPower *big.Int) + GetValidators() []string + IsRuleV1(feederID int64) bool + IsDeterministic(sourceID int64) (bool, error) + GetThreshold() *threshold +} + +// used to track validator change +type cacheValidator struct { + validators map[string]*big.Int + update bool + totalPower *big.Int +} + +// used to track params change +type cacheParams struct { + params *oracletypes.Params + update bool +} + +type cacheMsgs []*oracletypes.MsgItem + +type caches struct { + k Submitter + + msg *cacheMsgs + validators *cacheValidator + params *cacheParams +} + +type MsgItem struct { + FeederID int64 + Validator string + Power *big.Int + PriceSources []*priceSource +} + +// PriceInfo is the price information including price, decimal, time, and detID +// this is defined as a internal type as alias of oracletypes.PriceTimeDetID +type PriceInfo oracletypes.PriceTimeDetID + +// PricePower wraps PriceInfo with power and validators +// Validators indicates the validators who provided the price +// Power indicates the accumulated power of all validators who provided the price +type PricePower struct { + Price *PriceInfo + Power *big.Int + Validators map[string]struct{} +} + +// PriceResult is the final price information including price, decimal, time, and detID +// this is defined as a internal type as alias of PriceInfo +type PriceResult PriceInfo + +// PriceSource describes a specific source of related prices +// deteministic indicates whether the source is deterministic +// finalPrice indicates the final price of the source aggregated from all prices +// sourceID indicates the source ID +// detIDs indicates the detIDs of all prices, detID means the unique identifier of a price defined in the deterministic itself +// prices indicates all prices of the source, it's defined in slice instead of map to keep the order +type priceSource struct { + deterministic bool + finalPrice *PriceResult + sourceID int64 + detIDs map[string]struct{} + // ordered by detID + prices []*PriceInfo +} + +// priceValiadtor describes a specific validator of related priceSources(each source could has multiple prices) +// finalPrice indicates the final price of the validator aggregated from all prices of each source +// validator indicates the validator address +// power indicates the power of the validator +// priceSources indicates all priceSources of the validator, the map key is sourceID +type priceValidator struct { + finalPrice *PriceResult + validator string + power *big.Int + // each source will get a single final price independently, the order of sources does not matter, map is safe + priceSources map[int64]*priceSource +} + +// recordsValidators is the price records for all validators, the record item is priceValidator +// finalPrice indicates the final price of all validators aggregated from all prices of each validator +// finalPrices indicates the final price of each validator aggregated from all of its prices of each source +// accumulatedPower indicates the accumulated power of all validators +// records indicates all priceValidators, the map key is validator address +type recordsValidators struct { + finalPrice *PriceResult + finalPrices map[string]*PriceResult + // TODO: V2: accumulatedValidPower only includes validators who providing all sources required by rules(defined in oracle.Params) + // accumulatedValidVpower: map[string]*big.Int + accumulatedPower *big.Int + // each validator will get a single final price independently, the order of validators does not matter, map is safe + records map[string]*priceValidator +} + +// recordsDS is the price records of a deterministic source +// finalPrice indicates the final price of a specific detID chosen out of all prices with different detIDs which pass the threshold +// finalDetID indicates the final detID chosen out of all detIDs +// accumulatedPowers indicates the accumulated power of all validators who provided the price for this source +// valiators indicates all validators who provided the price for this source +// records indicates all PricePower of this source provided by different validators, the slice is ordered by detID +type recordsDS struct { + finalPrice *PriceResult + finalDetID string + accumulatedPowers *big.Int + validators map[string]struct{} + // ordered by detID + records []*PricePower +} + +// each source will get a final price independently, the order of sources does not matter, map is safe +// recordsDSs is the price records for all deterministic sources +// threshold indicates the threshold defined to decide final price for each source +type recordsDSs struct { + t *threshold + dsMap map[int64]*recordsDS +} + +// threshold is defined as (thresholdA * thresholdB) / totalPower +// when do compare with power, it should be (thresholdB * power) > (thresholdA * totalPower) to avoid decimal calculation +type threshold struct { + totalPower *big.Int + thresholdA *big.Int + thresholdB *big.Int +} + +func (t *threshold) Equals(t2 *threshold) bool { + if t == nil || t2 == nil { + return t == t2 + } + return t.totalPower.Cmp(t2.totalPower) == 0 && t.thresholdA.Cmp(t2.thresholdA) == 0 && t.thresholdB.Cmp(t2.thresholdB) == 0 +} + +func (t *threshold) Cpy() *threshold { + return &threshold{ + totalPower: new(big.Int).Set(t.totalPower), + thresholdA: new(big.Int).Set(t.thresholdA), + thresholdB: new(big.Int).Set(t.thresholdB), + } +} + +func (t *threshold) Exceeds(power *big.Int) bool { + return new(big.Int).Mul(t.thresholdB, power).Cmp(new(big.Int).Mul(t.thresholdA, t.totalPower)) > 0 +} + +// aggregator is the price aggregator for a specific round +// t is the threshold definition for price consensus for the round +// finalPrice indicates the final price of the round +// v is the price records for all validators with prices they provided +// ds is the price records for all deterministic sources which could have prices with multiple detIDs provided by validators +// algo is the aggregation algorithm for the round, currently we use 'Median' as default +type aggregator struct { + t *threshold + finalPrice *PriceResult + v *recordsValidators + ds *recordsDSs + algo AggAlgorithm +} + +// roundStatus indicates the status of a round +type roundStatus int32 + +const ( + // define closed as default value 0 + // close: the round is closed for price submission and no valid price to commit + roundStatusClosed roundStatus = iota + // open: the round is open for price submission + roundStatusOpen + // committable: the round is closed for price submission and available for price to commit + roundStatusCommittable +) + +// round is the price round for a specific tokenFeeder corresponding to the price feed progress of a specific token +type round struct { + // startBaseBlock is the start block height of corresponding tokenFeeder + startBaseBlock int64 + // startRoundID is the round ID of corresponding tokenFeeder + startRoundID int64 + // endBlock is the end block height of the corresponding tokenFeeder + endBlock int64 + // interval is the interval of the corresponding tokenFeeder + interval int64 + // quoteWindowSize is the quote window size of the corresponding tokenFeeder + quoteWindowSize int64 + + // feederID is the feeder ID of the corresponding tokenFeeder + feederID int64 + // tokenID is the token ID of the corresponding tokenFeeder + tokenID int64 + + // roundBaseBlock is the round base block of current round + roundBaseBlock int64 + // roundID is the round ID of current round + roundID int64 + // status indicates the status of current round + status roundStatus + // aggregator is the price aggregator for current round + a *aggregator + // cache is the cache reader for current round to provide params, validators information + cache CacheReader + // algo is the aggregation algorithm for current round to get final price + algo AggAlgorithm +} + +type orderedSliceInt64 []int64 + +func (osi orderedSliceInt64) Equals(o2 orderedSliceInt64) bool { + if len(osi) == 0 || len(o2) == 0 { + return len(osi) == len(o2) + } + + for idx, v := range osi { + if v != (o2)[idx] { + return false + } + } + return true +} + +func (osi *orderedSliceInt64) add(i int64) { + result := append(*osi, i) + sort.Slice(result, func(i, j int) bool { + return result[i] < result[j] + }) + *osi = result +} + +func (osi *orderedSliceInt64) remove(i int64) { + for idx, v := range *osi { + if v == i { + *osi = append((*osi)[:idx], (*osi)[idx+1:]...) + return + } + } +} + +func (osi *orderedSliceInt64) sort() { + sort.Slice(*osi, func(i, j int) bool { + return (*osi)[i] < (*osi)[j] + }) +} + +// FeederManager is the manager for the price feed progress of all token feeders +type FeederManager struct { + // fCheckTx is a copy of FeederManager used for mode of checkTx to simulate transactions + fCheckTx *FeederManager + // k is the oracle keeper + k common.KeeperOracle + // sortedFeederIDs is the ordered feeder IDs corresponding to all the rounds included in FeederManager + sortedFeederIDs orderedSliceInt64 + // rounds is the map of all rounds included in FeederManager, the key is the feeder ID + // TODO: change type of key from int64 to uint64 + rounds map[int64]*round + cs *caches + // paramsUpdated indicates whether the params are updated in current block + paramsUpdated bool + // validatorsUpdated indicates whether the validators are updated in current block + validatorsUpdated bool + // forceSeal indicates whether it's satisfied to force seal all the rounds + // when the validators are updated in current block or some related params are updated, this will be set to true. + forceSeal bool + // restSlashing indicates whether it's satisfied to reset slashing + // when the slashing params is changed in current block, this will be set to true. + resetSlashing bool +} diff --git a/x/oracle/keeper/keeper.go b/x/oracle/keeper/keeper.go index 6738b8b37..b0710c124 100644 --- a/x/oracle/keeper/keeper.go +++ b/x/oracle/keeper/keeper.go @@ -11,20 +11,12 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/aggregator" - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/cache" "github.com/ExocoreNetwork/exocore/x/oracle/keeper/common" + "github.com/ExocoreNetwork/exocore/x/oracle/keeper/feedermanagement" "github.com/ExocoreNetwork/exocore/x/oracle/types" ) type ( - memoryStore struct { - cs *cache.Cache - agc *aggregator.AggregatorContext - agcCheckTx *aggregator.AggregatorContext - updatedFeederIDs []string - } - Keeper struct { cdc codec.BinaryCodec storeKey storetypes.StoreKey @@ -35,8 +27,7 @@ type ( delegationKeeper types.DelegationKeeper assetsKeeper types.AssetsKeeper types.SlashingKeeper - // wrap all four memory cache into one pointer to track them among cpoies of Keeper (msgServer, module) - memStore *memoryStore + *feedermanagement.FeederManager } ) @@ -62,7 +53,7 @@ func NewKeeper( ps = ps.WithKeyTable(types.ParamKeyTable()) } - return Keeper{ + ret := Keeper{ cdc: cdc, storeKey: storeKey, memKey: memKey, @@ -72,8 +63,11 @@ func NewKeeper( assetsKeeper: assetsKeeper, authority: authority, SlashingKeeper: slashingKeeper, - memStore: new(memoryStore), + // fm: feedermanagement.NewFeederManager(nil), + FeederManager: feedermanagement.NewFeederManager(nil), } + ret.SetKeeper(ret) + return ret } func (k Keeper) Logger(ctx sdk.Context) log.Logger { diff --git a/x/oracle/keeper/keeper_suite_test.go b/x/oracle/keeper/keeper_suite_test.go index 9ce806875..f7ecffd88 100644 --- a/x/oracle/keeper/keeper_suite_test.go +++ b/x/oracle/keeper/keeper_suite_test.go @@ -7,6 +7,7 @@ import ( math "cosmossdk.io/math" "github.com/ExocoreNetwork/exocore/testutil" "github.com/ExocoreNetwork/exocore/x/oracle/keeper" + "github.com/ExocoreNetwork/exocore/x/oracle/keeper/testdata" "github.com/ExocoreNetwork/exocore/x/oracle/types" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/stretchr/testify/suite" @@ -67,19 +68,18 @@ func TestKeeper(t *testing.T) { suite.Run(t, ks) - resetSingle(ks.App.OracleKeeper) RegisterFailHandler(Fail) RunSpecs(t, "Keeper Suite") } func (suite *KeeperSuite) Reset() { - p4Test := DefaultParamsForTest() + p4Test := testdata.DefaultParamsForTest() p4Test.TokenFeeders[1].StartBaseBlock = 1 suite.k.SetParams(suite.ctx, p4Test) + suite.k.FeederManager.SetNilCaches() + suite.k.FeederManager.BeginBlock(suite.ctx) suite.ctx = suite.ctx.WithBlockHeight(12) - suite.ctrl = gomock.NewController(suite.t) - resetSingle(suite.App.OracleKeeper) } func (suite *KeeperSuite) SetupTest() { @@ -101,19 +101,14 @@ func (suite *KeeperSuite) SetupTest() { validators := suite.ValSet.Validators suite.valAddr1, _ = sdk.ValAddressFromBech32(sdk.ValAddress(validators[0].Address).String()) suite.valAddr2, _ = sdk.ValAddressFromBech32(sdk.ValAddress(validators[1].Address).String()) - resetSingle(suite.App.OracleKeeper) suite.k = suite.App.OracleKeeper suite.ms = keeper.NewMsgServerImpl(suite.App.OracleKeeper) suite.ctx = suite.Ctx // Initialize params - p4Test := DefaultParamsForTest() + p4Test := testdata.DefaultParamsForTest() p4Test.TokenFeeders[1].StartBaseBlock = 1 suite.k.SetParams(suite.ctx, p4Test) suite.ctx = suite.ctx.WithBlockHeight(12) -} - -func resetSingle(k keeper.Keeper) { - k.ResetAggregatorContext() - k.ResetCache() + suite.k.FeederManager.BeginBlock(suite.ctx) } diff --git a/x/oracle/keeper/msg_server_create_price.go b/x/oracle/keeper/msg_server_create_price.go index a2c903abb..d2ab78679 100644 --- a/x/oracle/keeper/msg_server_create_price.go +++ b/x/oracle/keeper/msg_server_create_price.go @@ -2,17 +2,21 @@ package keeper import ( "context" - "errors" + "crypto/sha256" + "encoding/base64" "strconv" + "strings" "time" + sdkerrors "cosmossdk.io/errors" "github.com/ExocoreNetwork/exocore/x/oracle/types" sdk "github.com/cosmos/cosmos-sdk/types" ) const ( layout = "2006-01-02 15:04:05" - maxFutureOffset = 5 * time.Second + maxFutureOffset = 30 * time.Second + maxPriceLength = 32 ) // CreatePrice proposes price for new round of specific tokenFeeder @@ -24,78 +28,65 @@ func (ms msgServer) CreatePrice(goCtx context.Context, msg *types.MsgCreatePrice defer func() { ctx = ctx.WithGasMeter(gasMeter) }() - logger := ms.Keeper.Logger(ctx) + + logger := ms.Logger(ctx) + + validator, _ := types.ConsAddrStrFromCreator(msg.Creator) + logQuote := []interface{}{"feederID", msg.FeederID, "baseBlock", msg.BasedBlock, "proposer", validator, "msg-nonce", msg.Nonce, "height", ctx.BlockHeight()} + if err := checkTimestamp(ctx, msg); err != nil { - logger.Info("price proposal timestamp check failed", "error", err, "height", ctx.BlockHeight()) + logger.Error("quote has invalid timestamp", append(logQuote, "error", err)...) return nil, types.ErrPriceProposalFormatInvalid.Wrap(err.Error()) } - agc := ms.Keeper.GetAggregatorContext(ctx) - newItem, caches, err := agc.NewCreatePrice(ctx, msg) + // core logic and functionality of Price Aggregation + finalPrice, err := ms.ProcessQuote(ctx, msg, ctx.IsCheckTx()) if err != nil { - logger.Info("price proposal failed", "error", err, "height", ctx.BlockHeight(), "feederID", msg.FeederID) + if sdkerrors.IsOf(err, types.ErrQuoteRecorded) { + // quote is recorded only, this happens when a quoting-window is not availalbe before that window end due to final price aggregated successfully in advance + // we will still record this msg if it's valid + logger.Info("recorded quote for oracle-behavior evaluation", append(logQuote, "msg", msg)...) + return &types.MsgCreatePriceResponse{}, nil + } + logger.Error("failed to process quote", append(logQuote, "error", err)...) return nil, err } - logger.Info("add price proposal for aggregation", "feederID", msg.FeederID, "basedBlock", msg.BasedBlock, "proposer", msg.Creator, "height", ctx.BlockHeight()) - + logger.Info("added quote for aggregation", append(logQuote, "msg", msg)...) + // TODO: use another type ctx.EventManager().EmitEvent(sdk.NewEvent( types.EventTypeCreatePrice, sdk.NewAttribute(types.AttributeKeyFeederID, strconv.FormatUint(msg.FeederID, 10)), sdk.NewAttribute(types.AttributeKeyBasedBlock, strconv.FormatUint(msg.BasedBlock, 10)), - sdk.NewAttribute(types.AttributeKeyProposer, msg.Creator), - ), - ) + sdk.NewAttribute(types.AttributeKeyProposer, validator), + )) - if caches == nil { - return &types.MsgCreatePriceResponse{}, nil - } - if newItem != nil { - if success := ms.AppendPriceTR(ctx, newItem.TokenID, newItem.PriceTR); !success { - // This case should not exist, keep this line to avoid consensus fail if this happens - prevPrice, nextRoundID := ms.GrowRoundID(ctx, newItem.TokenID) - logger.Error("append new price round fail for mismatch roundID, and will just grow roundID with previous price", "roundID from finalPrice", newItem.PriceTR.RoundID, "expect nextRoundID", nextRoundID, "prevPrice", prevPrice) - } else { - logger.Info("final price aggregation done", "feederID", msg.FeederID, "roundID", newItem.PriceTR.RoundID, "price", newItem.PriceTR.Price) + if finalPrice != nil { + logger.Info("final price successfully aggregated", "price", finalPrice, "feederID", msg.FeederID, "height", ctx.BlockHeight()) + decimalStr := strconv.FormatInt(int64(finalPrice.Decimal), 10) + // #nosec G115 + tokenID, _ := ms.GetTokenIDForFeederID(int64(msg.FeederID)) + tokenIDStr := strconv.FormatInt(tokenID, 10) + roundIDStr := strconv.FormatUint(finalPrice.RoundID, 10) + priceStr := finalPrice.Price + + // if price is too long, hash it + // this is to prevent the price from being too long and causing the event to be too long + // price is also used for 'nst' to describe the balance change, and it will be at least 32 bytes at that case + if len(priceStr) >= maxPriceLength { + hash := sha256.New() + hash.Write([]byte(priceStr)) + priceStr = base64.StdEncoding.EncodeToString(hash.Sum(nil)) } - decimalStr := strconv.FormatInt(int64(newItem.PriceTR.Decimal), 10) - tokenIDStr := strconv.FormatUint(newItem.TokenID, 10) - roundIDStr := strconv.FormatUint(newItem.PriceTR.RoundID, 10) + // emit event to tell price is updated for current round of corresponding feederID ctx.EventManager().EmitEvent(sdk.NewEvent( types.EventTypeCreatePrice, sdk.NewAttribute(types.AttributeKeyRoundID, roundIDStr), - sdk.NewAttribute(types.AttributeKeyFinalPrice, tokenIDStr+"_"+roundIDStr+"_"+newItem.PriceTR.Price+"_"+decimalStr), + sdk.NewAttribute(types.AttributeKeyFinalPrice, strings.Join([]string{tokenIDStr, roundIDStr, priceStr, decimalStr}, "_")), sdk.NewAttribute(types.AttributeKeyPriceUpdated, types.AttributeValuePriceUpdatedSuccess)), ) - if !ctx.IsCheckTx() { - ms.Keeper.GetCaches().RemoveCache(caches) - ms.Keeper.AppendUpdatedFeederIDs(msg.FeederID) - } - } else if !ctx.IsCheckTx() { - ms.Keeper.GetCaches().AddCache(caches) } return &types.MsgCreatePriceResponse{}, nil } - -func checkTimestamp(goCtx context.Context, msg *types.MsgCreatePrice) error { - ctx := sdk.UnwrapSDKContext(goCtx) - now := ctx.BlockTime().UTC() - for _, ps := range msg.Prices { - for _, price := range ps.Prices { - ts := price.Timestamp - if len(ts) == 0 { - return errors.New("timestamp should not be empty") - } - t, err := time.ParseInLocation(layout, ts, time.UTC) - if err != nil { - return errors.New("timestamp format invalid") - } - if now.Add(maxFutureOffset).Before(t) { - return errors.New("timestamp is in the future") - } - } - } - return nil -} diff --git a/x/oracle/keeper/msg_server_create_price_test.go b/x/oracle/keeper/msg_server_create_price_test.go deleted file mode 100644 index b2f64793c..000000000 --- a/x/oracle/keeper/msg_server_create_price_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package keeper_test - -import ( - reflect "reflect" - - math "cosmossdk.io/math" - dogfoodkeeper "github.com/ExocoreNetwork/exocore/x/dogfood/keeper" - dogfoodtypes "github.com/ExocoreNetwork/exocore/x/dogfood/types" - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/cache" - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/testdata" - "github.com/ExocoreNetwork/exocore/x/oracle/types" - . "github.com/agiledragon/gomonkey/v2" - sdk "github.com/cosmos/cosmos-sdk/types" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -//go:generate mockgen -destination mock_validator_test.go -package keeper_test github.com/cosmos/cosmos-sdk/x/staking/types ValidatorI - -var _ = Describe("MsgCreatePrice", func() { - var c *cache.Cache - var p *Patches - BeforeEach(func() { - ks.Reset() - Expect(ks.ms).ToNot(BeNil()) - - // TODO: remove monkey patch for test - p = ApplyMethod(reflect.TypeOf(dogfoodkeeper.Keeper{}), "GetLastTotalPower", func(k dogfoodkeeper.Keeper, ctx sdk.Context) math.Int { return math.NewInt(3) }) - p.ApplyMethod(reflect.TypeOf(dogfoodkeeper.Keeper{}), "GetAllExocoreValidators", func(k dogfoodkeeper.Keeper, ctx sdk.Context) []dogfoodtypes.ExocoreValidator { - return []dogfoodtypes.ExocoreValidator{ - { - Address: ks.mockValAddr1, - Power: 1, - }, - { - Address: ks.mockValAddr2, - Power: 1, - }, - { - Address: ks.mockValAddr3, - Power: 1, - }, - } - }) - - Expect(ks.ctx.BlockHeight()).To(Equal(int64(12))) - }) - - AfterEach(func() { - ks.ctrl.Finish() - if p != nil { - p.Reset() - } - }) - - Context("3 validators with 1 voting power each", func() { - BeforeEach(func() { - ks.ms.CreatePrice(ks.ctx, &types.MsgCreatePrice{ - Creator: ks.mockConsAddr1.String(), - FeederID: 1, - Prices: testdata.PS1, - BasedBlock: 11, - Nonce: 1, - }) - - c = ks.App.OracleKeeper.GetCaches() - // c = ks.ms.Keeper.GetCaches() - var pRes cache.ItemP - c.GetCache(&pRes) - p4Test := DefaultParamsForTest() - p4Test.TokenFeeders[1].StartBaseBlock = 1 - Expect(pRes).Should(BeEquivalentTo(p4Test)) - }) - - It("success on 3rd message", func() { - iRes := make([]*cache.ItemM, 0) - c.GetCache(&iRes) - Expect(iRes[0].Validator).Should(Equal(ks.mockConsAddr1.String())) - - ks.ms.CreatePrice(ks.ctx, &types.MsgCreatePrice{ - Creator: ks.mockConsAddr2.String(), - FeederID: 1, - Prices: testdata.PS2, - BasedBlock: 11, - Nonce: 1, - }, - ) - ks.ms.CreatePrice(ks.ctx, &types.MsgCreatePrice{}) - c.GetCache(&iRes) - Expect(len(iRes)).Should(Equal(2)) - - ks.ms.CreatePrice(ks.ctx, &types.MsgCreatePrice{ - Creator: ks.mockConsAddr3.String(), - FeederID: 1, - Prices: testdata.PS4, - BasedBlock: 11, - Nonce: 1, - }, - ) - c.GetCache(&iRes) - Expect(len(iRes)).Should(Equal(0)) - prices := ks.k.GetAllPrices(sdk.UnwrapSDKContext(ks.ctx)) - Expect(prices[0]).Should(BeEquivalentTo(types.Prices{ - TokenID: 1, - NextRoundID: 3, - PriceList: []*types.PriceTimeRound{ - { - Price: "1", - Decimal: 0, - Timestamp: "", - RoundID: 1, - }, - { - Price: testdata.PTD2.Price, - Decimal: testdata.PTD2.Decimal, - Timestamp: prices[0].PriceList[1].Timestamp, - RoundID: 2, - }, - }, - })) - }) - }) -}) diff --git a/x/oracle/keeper/msg_server_update_params.go b/x/oracle/keeper/msg_server_update_params.go index 87f35f2c4..3a8051303 100644 --- a/x/oracle/keeper/msg_server_update_params.go +++ b/x/oracle/keeper/msg_server_update_params.go @@ -4,7 +4,6 @@ import ( "context" utils "github.com/ExocoreNetwork/exocore/utils" - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/cache" "github.com/ExocoreNetwork/exocore/x/oracle/types" sdk "github.com/cosmos/cosmos-sdk/types" govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" @@ -66,7 +65,8 @@ func (ms msgServer) UpdateParams(goCtx context.Context, msg *types.MsgUpdatePara } // set updated new params ms.SetParams(ctx, p) - _ = ms.Keeper.GetAggregatorContext(ctx) - ms.Keeper.GetCaches().AddCache(cache.ItemP(p)) + if !ctx.IsCheckTx() { + ms.SetParamsUpdated() + } return &types.MsgUpdateParamsResponse{}, nil } diff --git a/x/oracle/keeper/msg_server_update_params_test.go b/x/oracle/keeper/msg_server_update_params_test.go index c893c4a62..d2087961a 100644 --- a/x/oracle/keeper/msg_server_update_params_test.go +++ b/x/oracle/keeper/msg_server_update_params_test.go @@ -2,11 +2,10 @@ package keeper_test import ( reflect "reflect" - "time" - sdkmath "cosmossdk.io/math" dogfoodkeeper "github.com/ExocoreNetwork/exocore/x/dogfood/keeper" dogfoodtypes "github.com/ExocoreNetwork/exocore/x/dogfood/types" + testdata "github.com/ExocoreNetwork/exocore/x/oracle/keeper/testdata" "github.com/ExocoreNetwork/exocore/x/oracle/types" . "github.com/agiledragon/gomonkey/v2" "github.com/cosmos/cosmos-sdk/testutil/mock" @@ -15,75 +14,6 @@ import ( . "github.com/onsi/gomega" ) -func DefaultParamsForTest() types.Params { - return types.Params{ - Chains: []*types.Chain{ - {Name: "-", Desc: "-"}, - {Name: "Ethereum", Desc: "-"}, - }, - Tokens: []*types.Token{ - {}, - { - Name: "ETH", - ChainID: 1, - ContractAddress: "0x", - Decimal: 18, - Active: true, - AssetID: "0x0b34c4d876cd569129cf56bafabb3f9e97a4ff42_0x9ce1", - }, - }, - // source defines where to fetch the prices - Sources: []*types.Source{ - { - Name: "0 position is reserved", - }, - { - Name: "Chainlink", - Entry: &types.Endpoint{ - Offchain: map[uint64]string{0: ""}, - }, - Valid: true, - Deterministic: true, - }, - }, - // rules defines price from which sources are accepted, could be used to proof malicious - Rules: []*types.RuleSource{ - // 0 is reserved - {}, - { - // all sources math - SourceIDs: []uint64{0}, - }, - }, - // TokenFeeder describes when a token start to be updated with its price, and the frequency, endTime. - TokenFeeders: []*types.TokenFeeder{ - {}, - { - TokenID: 1, - RuleID: 1, - StartRoundID: 1, - StartBaseBlock: 1000000, - Interval: 10, - }, - }, - MaxNonce: 3, - ThresholdA: 2, - ThresholdB: 3, - // V1 set mode to 1 - Mode: types.ConsensusModeASAP, - MaxDetId: 5, - MaxSizePrices: 100, - Slashing: &types.SlashingParams{ - ReportedRoundsWindow: 100, - MinReportedPerWindow: sdkmath.LegacyNewDec(1).Quo(sdkmath.LegacyNewDec(2)), - OracleMissJailDuration: 600 * time.Second, - OracleMaliciousJailDuration: 30 * 24 * time.Hour, - SlashFractionMiss: sdkmath.LegacyNewDec(1).Quo(sdkmath.LegacyNewDec(20)), - SlashFractionMalicious: sdkmath.LegacyNewDec(1).Quo(sdkmath.LegacyNewDec(10)), - }, - } -} - var _ = Describe("MsgUpdateParams", Ordered, func() { var defaultParams types.Params var patcher *Patches @@ -95,7 +25,7 @@ var _ = Describe("MsgUpdateParams", Ordered, func() { BeforeEach(func() { ks.Reset() Expect(ks.ms).ToNot(BeNil()) - defaultParams = DefaultParamsForTest() + defaultParams = testdata.DefaultParamsForTest() ks.k.SetParams(ks.ctx, defaultParams) privVal1 := mock.NewPV() @@ -183,7 +113,7 @@ var _ = Describe("MsgUpdateParams", Ordered, func() { types.ErrInvalidParams.Wrap("invalid token to add, chain not found"), types.ErrInvalidParams.Wrap("invalid token to add, chain not found"), } - token := DefaultParamsForTest().Tokens[1] + token := testdata.DefaultParamsForTest().Tokens[1] token1 := *token token1.Decimal = 8 diff --git a/x/oracle/keeper/native_token.go b/x/oracle/keeper/native_token.go index 63bbcb725..86ae35b16 100644 --- a/x/oracle/keeper/native_token.go +++ b/x/oracle/keeper/native_token.go @@ -3,6 +3,7 @@ package keeper import ( "errors" "fmt" + "strconv" "strings" sdkmath "cosmossdk.io/math" @@ -26,10 +27,15 @@ import ( type NSTAssetID string const ( - NSTETHAssetAddr = "0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee" + // NSTETHAssetAddr = "0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee" // TODO: we currently support NSTETH only which has capped effective balance for one validator // TODO: this is a bad practice, and for Lz, they have different version of endpoint with different chainID // Do the validation before invoke oracle related functions instead of check these hard code ids here. + ETHMainnetChainID = "0x7595" + ETHLocalnetChainID = "0x65" + ETHHoleskyChainID = "0x9d19" + ETHSepoliaChainID = "0x9ce1" + NSTETHAssetIDMainnet NSTAssetID = "0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee_0x7595" NSTETHAssetIDLocalnet NSTAssetID = "0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee_0x65" NSTETHAssetIDHolesky NSTAssetID = "0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee_0x9d19" @@ -111,7 +117,9 @@ func (k Keeper) GetAllStakerInfosAssets(ctx sdk.Context) (ret []types.StakerInfo for ; iterator.Valid(); iterator.Next() { assetID, _ := types.ParseNativeTokenStakerKey(iterator.Key()) if l == 0 || ret[l-1].AssetId != assetID { + version := k.GetNSTVersion(ctx, assetID) ret = append(ret, types.StakerInfosAssets{ + NstVersion: version, AssetId: assetID, StakerInfos: make([]*types.StakerInfo, 0), }) @@ -156,9 +164,11 @@ func (k Keeper) GetAllStakerListAssets(ctx sdk.Context) (ret []types.StakerListA for ; iterator.Valid(); iterator.Next() { v := &types.StakerList{} k.cdc.MustUnmarshal(iterator.Value(), v) + version := k.GetNSTVersion(ctx, string(iterator.Key())) ret = append(ret, types.StakerListAssets{ AssetId: string(iterator.Key()), StakerList: v, + NstVersion: version, }) } return ret @@ -199,6 +209,7 @@ func (k Keeper) UpdateNSTValidatorListForStaker(ctx sdk.Context, assetID, staker newBalance = *(stakerInfo.BalanceList[latestIndex]) newBalance.Index++ } + // #nosec G115 newBalance.Block = uint64(ctx.BlockHeight()) if amountInt64 > 0 { newBalance.Change = types.Action_ACTION_DEPOSIT @@ -256,27 +267,35 @@ func (k Keeper) UpdateNSTValidatorListForStaker(ctx sdk.Context, assetID, staker store.Set(key, bz) } + // valid veriosn start from 1 + version := k.IncreaseNSTVersion(ctx, assetID) // we use index to sync with client about status of stakerInfo.ValidatorPubkeyList - eventValue := fmt.Sprintf("%d_%s_%d", stakerInfo.StakerIndex, validatorPubkey, newBalance.Index) + eventValue := fmt.Sprintf("%d_%s_%d", stakerInfo.StakerIndex, validatorPubkey, version) if newBalance.Change == types.Action_ACTION_DEPOSIT { eventValue = fmt.Sprintf("%s_%s", types.AttributeValueNativeTokenDeposit, eventValue) } else { eventValue = fmt.Sprintf("%s_%s", types.AttributeValueNativeTokenWithdraw, eventValue) } - // emit an event to tell a new valdiator added/or a validator is removed for the staker + // emit an event to tell the details that a new valdiator added/or a validator is removed for the staker + // deposit_stakerID_validatorKey ctx.EventManager().EmitEvent(sdk.NewEvent( types.EventTypeCreatePrice, sdk.NewAttribute(types.AttributeKeyNativeTokenChange, eventValue), )) + return nil } // UpdateNSTByBalanceChange updates balance info for staker under native-restaking asset of assetID when its balance changed by slash/refund on the source chain (beacon chain for eth) -func (k Keeper) UpdateNSTByBalanceChange(ctx sdk.Context, assetID string, rawData []byte, roundID uint64) error { +func (k Keeper) UpdateNSTByBalanceChange(ctx sdk.Context, assetID string, price types.PriceTimeRound, version int64) error { if !IsLimitedChangeNST(assetID) { return types.ErrNSTAssetNotSupported } + if version != k.GetNSTVersion(ctx, assetID) { + return errors.New("version not match") + } _, chainID, _ := assetstypes.ParseID(assetID) + rawData := []byte(price.Price) if len(rawData) < 32 { return errors.New("length of indicate maps for stakers should be exactly 32 bytes") } @@ -304,13 +323,10 @@ func (k Keeper) UpdateNSTByBalanceChange(ctx sdk.Context, assetID string, rawDat newBalance = *(stakerInfo.BalanceList[length-1]) } newBalance.Block = uint64(ctx.BlockHeight()) - if newBalance.RoundID == roundID { - newBalance.Index++ - } else { - newBalance.RoundID = roundID - newBalance.Index = 0 - } + // we set index as a global reference used through all rounds + newBalance.Index++ newBalance.Change = types.Action_ACTION_SLASH_REFUND + newBalance.RoundID = price.RoundID // balance update are based on initial/max effective balance: 32 maxBalance := maxEffectiveBalance(assetID) * (len(stakerInfo.ValidatorPubkeyList)) balance := maxBalance + change @@ -342,6 +358,43 @@ func (k Keeper) UpdateNSTByBalanceChange(ctx sdk.Context, assetID string, rawDat return nil } +// IncreaseNSTVersion increases the version of native token for assetID +func (k Keeper) IncreaseNSTVersion(ctx sdk.Context, assetID string) int64 { + store := ctx.KVStore(k.storeKey) + key := types.NativeTokenVersionKey(assetID) + value := store.Get(key) + if value == nil { + // set the first index of version to 1 + store.Set(key, sdk.Uint64ToBigEndian(1)) + return 1 + } + version := sdk.BigEndianToUint64(value) + 1 + store.Set(key, sdk.Uint64ToBigEndian(version)) + // #nosec G115 + // TODO: use uint64 for version, the price-feeder may need corresponding change + return int64(version) +} + +// IncreaseNSTVersion increases the version of native token for assetID +func (k Keeper) SetNSTVersion(ctx sdk.Context, assetID string, version int64) int64 { + store := ctx.KVStore(k.storeKey) + key := types.NativeTokenVersionKey(assetID) + // #nosec version is not negative + store.Set(key, sdk.Uint64ToBigEndian(uint64(version))) + return version +} + +func (k Keeper) GetNSTVersion(ctx sdk.Context, assetID string) int64 { + store := ctx.KVStore(k.storeKey) + key := types.NativeTokenVersionKey(assetID) + value := store.Get(key) + if value == nil { + return 0 + } + // #nosec G115 + return int64(sdk.BigEndianToUint64(value)) +} + func (k Keeper) getDecimal(ctx sdk.Context, assetID string) (int, sdkmath.Int, error) { decimalMap, err := k.assetsKeeper.GetAssetsDecimal(ctx, map[string]interface{}{assetID: nil}) if err != nil { @@ -351,78 +404,6 @@ func (k Keeper) getDecimal(ctx sdk.Context, assetID string) (int, sdkmath.Int, e return int(decimal), sdkmath.NewIntWithDecimal(1, int(decimal)), nil } -// TODO: This conversion has limited length for balance change, it suites for beaconchain currently, If we extend to other changes, this method need to be upgrade -// for value that might be too big leading too long length of the change value, many related changes need to be done since the message size might be too big then -// parseBalanceChange parses rawData to details of amount change for all stakers relative to native restaking -func parseBalanceChangeCapped(rawData []byte, sl types.StakerList) (map[string]int, error) { - // eg. 0100-000011 - // first part 0100 tells that the effective-balance of staker corresponding to index 2 in StakerList - // the left part 000011. we use the first 4 bits to tell the length of this number, and it shows as 1 here, the 5th bit is used to tell symbol of the number, 1 means negative, then we can get the abs number indicate by the length. It's -1 here, means effective-balane is 32-1 on beacon chain for now - // the first 32 bytes are information to indicates effective-balance of which staker has changed, 1 means changed, 0 means not. 32 bytes can represents changes for at most 256 stakers - indexes := rawData[:32] - // bytes after first 32 are details of effective-balance change for each staker which has been marked with 1 in the first 32 bytes, for those who are marked with 0 will just be ignored - // For each staker we support at most 256 validators to join, so the biggest effective-balance change we would have is 256*32, then we need 13 bits to represents the number for each staker. And for compression we use 4 bits to tell the length of bits without leading 0 this number has. - // Then with the symbol we need at most 18 bits for each staker's effective-balance change: 0000.0.0000-0000-0000 (the leading 0 will be ignored for the last 13 bits) - changes := rawData[32:] - index := -1 - byteIndex := 0 - bitOffset := 0 - lengthBits := 5 - stakerChanges := make(map[string]int) - for _, b := range indexes { - for i := 7; i >= 0; i-- { - index++ - if (b>>i)&1 == 1 { - lenValue := changes[byteIndex] << bitOffset - bitsLeft := 8 - bitOffset - lenValue >>= (8 - lengthBits) - if bitsLeft < lengthBits { - byteIndex++ - lenValue |= changes[byteIndex] >> (8 - lengthBits + bitsLeft) - bitOffset = lengthBits - bitsLeft - } else { - if bitOffset += lengthBits; bitOffset == 8 { - bitOffset = 0 - } - if bitsLeft == lengthBits { - byteIndex++ - } - } - - symbol := lenValue & 1 - lenValue >>= 1 - if lenValue <= 0 { - // the range of length we accept is 1-15(the max we will use is actually 13) - return stakerChanges, errors.New("length of change value must be at least 1 bit") - } - - bitsExtracted := 0 - stakerChange := 0 - for bitsExtracted < int(lenValue) { - bitsLeft := 8 - bitOffset - byteValue := changes[byteIndex] << bitOffset - if (int(lenValue) - bitsExtracted) < bitsLeft { - bitsLeft = int(lenValue) - bitsExtracted - bitOffset += bitsLeft - } else { - byteIndex++ - bitOffset = 0 - } - byteValue >>= (8 - bitsLeft) - stakerChange = (stakerChange << bitsLeft) | int(byteValue) - bitsExtracted += bitsLeft - } - stakerChange++ - if symbol == 1 { - stakerChange *= -1 - } - stakerChanges[sl.StakerAddrs[index]] = stakerChange - } - } - } - return stakerChanges, nil -} - // TODO use []byte and assetstypes.GetStakerIDAndAssetID for stakerAddr representation func getStakerID(stakerAddr string, chainID uint64) string { return strings.Join([]string{strings.ToLower(stakerAddr), hexutil.EncodeUint64(chainID)}, utils.DelimiterForID) @@ -436,3 +417,15 @@ func IsLimitedChangeNST(assetID string) bool { func maxEffectiveBalance(assetID string) int { return maxEffectiveBalances[NSTAssetID(assetID)] } + +func getNSTVersionFromDetID(detID string) (int64, error) { + parsedDetID := strings.Split(detID, "_") + if len(parsedDetID) != 2 { + return 0, fmt.Errorf("invalid detID for nst, should be in format of detID_version, got:%s", detID) + } + nstVersion, err := strconv.ParseInt(parsedDetID[1], 10, 64) + if err != nil { + return 0, fmt.Errorf("failed to parse version from:%s, error:%w", parsedDetID[1], err) + } + return nstVersion, nil +} diff --git a/x/oracle/keeper/native_token_parser.go b/x/oracle/keeper/native_token_parser.go new file mode 100644 index 000000000..96f3d9404 --- /dev/null +++ b/x/oracle/keeper/native_token_parser.go @@ -0,0 +1,79 @@ +package keeper + +import ( + "errors" + + "github.com/ExocoreNetwork/exocore/x/oracle/types" +) + +// TODO: This conversion has limited length for balance change, it suites for beaconchain currently, If we extend to other changes, this method need to be upgrade +// for value that might be too big leading too long length of the change value, many related changes need to be done since the message size might be too big then +// parseBalanceChange parses rawData to details of amount change for all stakers relative to native restaking +func parseBalanceChangeCapped(rawData []byte, sl types.StakerList) (map[string]int, error) { + // eg. 0100-000011 + // first part 0100 tells that the effective-balance of staker corresponding to index 2 in StakerList + // the left part 000011. we use the first 4 bits to tell the length of this number, and it shows as 1 here, the 5th bit is used to tell symbol of the number, 1 means negative, then we can get the abs number indicate by the length. It's -1 here, means effective-balane is 32-1 on beacon chain for now + // the first 32 bytes are information to indicates effective-balance of which staker has changed, 1 means changed, 0 means not. 32 bytes can represents changes for at most 256 stakers + indexes := rawData[:32] + // bytes after first 32 are details of effective-balance change for each staker which has been marked with 1 in the first 32 bytes, for those who are marked with 0 will just be ignored + // For each staker we support at most 256 validators to join, so the biggest effective-balance change we would have is 256*32, then we need 13 bits to represents the number for each staker. And for compression we use 4 bits to tell the length of bits without leading 0 this number has. + // Then with the symbol we need at most 18 bits for each staker's effective-balance change: 0000.0.0000-0000-0000 (the leading 0 will be ignored for the last 13 bits) + changes := rawData[32:] + index := -1 + byteIndex := 0 + bitOffset := 0 + lengthBits := 5 + stakerChanges := make(map[string]int) + for _, b := range indexes { + for i := 7; i >= 0; i-- { + index++ + if (b>>i)&1 == 1 { + lenValue := changes[byteIndex] << bitOffset + bitsLeft := 8 - bitOffset + lenValue >>= (8 - lengthBits) + if bitsLeft < lengthBits { + byteIndex++ + lenValue |= changes[byteIndex] >> (8 - lengthBits + bitsLeft) + bitOffset = lengthBits - bitsLeft + } else { + if bitOffset += lengthBits; bitOffset == 8 { + bitOffset = 0 + } + if bitsLeft == lengthBits { + byteIndex++ + } + } + + symbol := lenValue & 1 + lenValue >>= 1 + if lenValue <= 0 { + // the range of length we accept is 1-15(the max we will use is actually 13) + return stakerChanges, errors.New("length of change value must be at least 1 bit") + } + + bitsExtracted := 0 + stakerChange := 0 + for bitsExtracted < int(lenValue) { + bitsLeft := 8 - bitOffset + byteValue := changes[byteIndex] << bitOffset + if (int(lenValue) - bitsExtracted) < bitsLeft { + bitsLeft = int(lenValue) - bitsExtracted + bitOffset += bitsLeft + } else { + byteIndex++ + bitOffset = 0 + } + byteValue >>= (8 - bitsLeft) + stakerChange = (stakerChange << bitsLeft) | int(byteValue) + bitsExtracted += bitsLeft + } + stakerChange++ + if symbol == 1 { + stakerChange *= -1 + } + stakerChanges[sl.StakerAddrs[index]] = stakerChange + } + } + } + return stakerChanges, nil +} diff --git a/x/oracle/keeper/native_token_test.go b/x/oracle/keeper/native_token_test.go index ec02bb94c..cb3492eee 100644 --- a/x/oracle/keeper/native_token_test.go +++ b/x/oracle/keeper/native_token_test.go @@ -78,11 +78,13 @@ func (ks *KeeperSuite) TestNSTLifeCycleOneStaker() { {0, -10}, } rawData := convertBalanceChangeToBytes(stakerChanges) - ks.App.OracleKeeper.UpdateNSTByBalanceChange(ks.Ctx, assetID, rawData, 9) + // ks.App.OracleKeeper.UpdateNSTByBalanceChange(ks.Ctx, assetID, rawData, 9) + ks.App.OracleKeeper.UpdateNSTByBalanceChange(ks.Ctx, assetID, types.PriceTimeRound{Price: string(rawData), RoundID: 9}, 1) // - 2.1 check stakerInfo stakerInfo = ks.App.OracleKeeper.GetStakerInfo(ks.Ctx, assetID, stakerStr) ks.Equal(types.BalanceInfo{ Block: 1, + Index: 1, RoundID: 9, Change: types.Action_ACTION_SLASH_REFUND, // this is expected to be 32-10=22, not 100-10 @@ -121,7 +123,7 @@ func (ks *KeeperSuite) TestNSTLifeCycleOneStaker() { ks.Equal(types.BalanceInfo{ Block: 1, RoundID: 9, - Index: 1, + Index: 2, Change: types.Action_ACTION_DEPOSIT, Balance: 54, }, *stakerInfo.BalanceList[2]) @@ -134,14 +136,14 @@ func (ks *KeeperSuite) TestNSTLifeCycleOneStaker() { {0, -5}, } rawData = convertBalanceChangeToBytes(stakerChanges) - ks.App.OracleKeeper.UpdateNSTByBalanceChange(ks.Ctx, assetID, rawData, 11) + ks.App.OracleKeeper.UpdateNSTByBalanceChange(ks.Ctx, assetID, types.PriceTimeRound{Price: string(rawData), RoundID: 11}, 2) // - 4.1 check stakerInfo stakerInfo = ks.App.OracleKeeper.GetStakerInfo(ks.Ctx, assetID, stakerStr) ks.Equal(types.BalanceInfo{ Balance: 59, Block: 1, RoundID: 11, - Index: 0, + Index: 3, Change: types.Action_ACTION_SLASH_REFUND, }, *stakerInfo.BalanceList[3]) // check stakerAssetInfo is updated correctly in assets module, this should be triggered in assets module by oracle module's UpdateNSTByBalanceChange @@ -162,7 +164,7 @@ func (ks *KeeperSuite) TestNSTLifeCycleOneStaker() { Balance: 29, Block: 1, RoundID: 11, - Index: 1, + Index: 4, Change: types.Action_ACTION_WITHDRAW, }, *stakerInfo.BalanceList[4]) // withdraw will remove this validator diff --git a/x/oracle/keeper/nonce.go b/x/oracle/keeper/nonce.go index 447d8133b..505907e98 100644 --- a/x/oracle/keeper/nonce.go +++ b/x/oracle/keeper/nonce.go @@ -3,7 +3,6 @@ package keeper import ( "fmt" - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/common" "github.com/ExocoreNetwork/exocore/x/oracle/types" "github.com/cosmos/cosmos-sdk/store/prefix" sdk "github.com/cosmos/cosmos-sdk/types" @@ -21,97 +20,17 @@ func (k Keeper) SetNonce(ctx sdk.Context, nonce types.ValidatorNonce) { k.setNonce(store, nonce) } -// AddNonceItem add a nonce item for a specific validator -func (k Keeper) AddNonceItem(ctx sdk.Context, nonce types.ValidatorNonce) { - store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.NonceKeyPrefix)) - if n, found := k.getNonce(store, nonce.Validator); found { - feederIDs := make(map[uint64]struct{}) - for _, v := range n.NonceList { - feederIDs[v.FeederID] = struct{}{} - } - for _, v := range nonce.NonceList { - if _, ok := feederIDs[v.FeederID]; ok { - continue - } - n.NonceList = append(n.NonceList, v) - } - k.setNonce(store, n) - } else { - k.setNonce(store, nonce) - } -} - -// AddZeroNonceItemForValidators init the nonce of a specific feederID for a set of validators -func (k Keeper) AddZeroNonceItemWithFeederIDForValidators(ctx sdk.Context, feederID uint64, validators []string) { - store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.NonceKeyPrefix)) - for _, validator := range validators { - if n, found := k.getNonce(store, validator); found { - found := false - for _, v := range n.NonceList { - if v.FeederID == feederID { - found = true - break - } - } - if !found { - n.NonceList = append(n.NonceList, &types.Nonce{FeederID: feederID, Value: 0}) - k.setNonce(store, n) - } - } else { - k.setNonce(store, types.ValidatorNonce{Validator: validator, NonceList: []*types.Nonce{{FeederID: feederID, Value: 0}}}) - } - } -} - // RemoveNonceWithValidator remove the nonce for a specific validator func (k Keeper) RemoveNonceWithValidator(ctx sdk.Context, validator string) { store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.NonceKeyPrefix)) k.removeNonceWithValidator(store, validator) } -// RemoveNonceWithValidatorAndFeederID remove the nonce for a specific validator and feederID -func (k Keeper) RemoveNonceWithValidatorAndFeederID(ctx sdk.Context, validator string, feederID uint64) bool { - store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.NonceKeyPrefix)) - if nonce, found := k.GetNonce(ctx, validator); found { - for i, n := range nonce.NonceList { - if n.FeederID == feederID { - nonce.NonceList = append(nonce.NonceList[:i], nonce.NonceList[i+1:]...) - if len(nonce.NonceList) == 0 { - k.removeNonceWithValidator(store, validator) - } else { - k.setNonce(store, nonce) - } - return true - } - } - } - return false -} - -// RemoveNonceWithFeederIDForValidators remove the nonce for a specific feederID from a set of validators -func (k Keeper) RemoveNonceWithFeederIDForValidators(ctx sdk.Context, feederID uint64, validators []string) { - store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.NonceKeyPrefix)) - k.removeNonceWithFeederIDForValidators(store, feederID, validators) -} - -// RemoveNonceWithFeederIDForAll remove the nonce for a specific feederID from all validators -func (k Keeper) RemoveNonceWithFeederIDForAll(ctx sdk.Context, feederID uint64) { - store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.NonceKeyPrefix)) - iterator := store.Iterator(nil, nil) - defer iterator.Close() - var validators []string - for ; iterator.Valid(); iterator.Next() { - var nonce types.ValidatorNonce - k.cdc.MustUnmarshal(iterator.Value(), &nonce) - validators = append(validators, nonce.Validator) - } - k.removeNonceWithFeederIDForValidators(store, feederID, validators) -} - -// CheckAndIncreaseNonce check and increase the nonce for a specific validator and feederID func (k Keeper) CheckAndIncreaseNonce(ctx sdk.Context, validator string, feederID uint64, nonce uint32) (prevNonce uint32, err error) { - if nonce > uint32(common.MaxNonce) { - return 0, fmt.Errorf("nonce_check_failed: max_exceeded: limit=%d received=%d", common.MaxNonce, nonce) + maxNonce := k.GetMaxNonceFromCache() + // #nosec G115 // safe conversion + if nonce > uint32(maxNonce) { + return 0, fmt.Errorf("nonce_check_failed: max_exceeded: limit=%d received=%d", maxNonce, nonce) } store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.NonceKeyPrefix)) if n, found := k.getNonce(store, validator); found { @@ -131,7 +50,6 @@ func (k Keeper) CheckAndIncreaseNonce(ctx sdk.Context, validator string, feederI } // internal usage for avoiding duplicated 'NewStore' - func (k Keeper) getNonce(store prefix.Store, validator string) (types.ValidatorNonce, bool) { bz := store.Get(types.NonceKey(validator)) if bz != nil { @@ -151,26 +69,82 @@ func (k Keeper) removeNonceWithValidator(store prefix.Store, validator string) { store.Delete(types.NonceKey(validator)) } -func (k Keeper) removeNonceWithValidatorAndFeederID(store prefix.Store, validator string, feederID uint64) bool { - if nonce, found := k.getNonce(store, validator); found { - // TODO: performance. key-value: validator_feederID:nonce - for i, n := range nonce.NonceList { - if n.FeederID == feederID { - nonce.NonceList = append(nonce.NonceList[:i], nonce.NonceList[i+1:]...) - if len(nonce.NonceList) == 0 { - k.removeNonceWithValidator(store, validator) - } else { - k.setNonce(store, nonce) +// AddZeroNonceItemWithFeederIDsForValidators init the nonce of a batch of feederIDs for a set of validators +// feederIDs must be ordered +func (k Keeper) AddZeroNonceItemWithFeederIDsForValidators(ctx sdk.Context, feederIDs []uint64, validators []string) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.NonceKeyPrefix)) + for _, validator := range validators { + if n, found := k.getNonce(store, validator); found { + fIDs := make(map[uint64]struct{}) + for _, v := range n.NonceList { + fIDs[v.FeederID] = struct{}{} + } + updated := false + // added feederIDs are kept ordered + for _, feederID := range feederIDs { + if _, ok := fIDs[feederID]; !ok { + n.NonceList = append(n.NonceList, &types.Nonce{FeederID: feederID, Value: 0}) + fIDs[feederID] = struct{}{} + updated = true } - return true } + if updated { + k.setNonce(store, n) + } + } else { + n := types.ValidatorNonce{Validator: validator, NonceList: make([]*types.Nonce, 0, len(feederIDs))} + // ordered feederIDs + for _, feederID := range feederIDs { + n.NonceList = append(n.NonceList, &types.Nonce{FeederID: feederID, Value: 0}) + } + k.setNonce(store, n) } } - return false } -func (k Keeper) removeNonceWithFeederIDForValidators(store prefix.Store, feederID uint64, validators []string) { +// RemoveNonceWithFeederIDsForValidators remove the nonce for a batch of feederIDs from a set of validators +func (k Keeper) RemoveNonceWithFeederIDsForValidators(ctx sdk.Context, feederIDs []uint64, validators []string) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.NonceKeyPrefix)) + k.removeNonceWithFeederIDsForValidators(store, feederIDs, validators) +} + +func (k Keeper) RemoveNonceWithFeederIDsForAll(ctx sdk.Context, feederIDs []uint64) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.NonceKeyPrefix)) + iterator := store.Iterator(nil, nil) + defer iterator.Close() + var validators []string + for ; iterator.Valid(); iterator.Next() { + var nonce types.ValidatorNonce + k.cdc.MustUnmarshal(iterator.Value(), &nonce) + validators = append(validators, nonce.Validator) + } + + k.removeNonceWithFeederIDsForValidators(store, feederIDs, validators) +} + +func (k Keeper) removeNonceWithFeederIDsForValidators(store prefix.Store, feederIDs []uint64, validators []string) { + fIDs := make(map[uint64]struct{}) + for _, feederID := range feederIDs { + fIDs[feederID] = struct{}{} + } for _, validator := range validators { - k.removeNonceWithValidatorAndFeederID(store, validator, feederID) + if nonce, found := k.getNonce(store, validator); found { + l := len(nonce.NonceList) + // the order in nonceList is kept after removed + for i := 0; i < l; i++ { + n := nonce.NonceList[i] + if _, ok := fIDs[n.FeederID]; ok { + nonce.NonceList = append(nonce.NonceList[:i], nonce.NonceList[i+1:]...) + } + i-- + l-- + } + + if len(nonce.NonceList) == 0 { + k.removeNonceWithValidator(store, validator) + } else { + k.setNonce(store, nonce) + } + } } } diff --git a/x/oracle/keeper/params.go b/x/oracle/keeper/params.go index 9f7ccff5a..6a189b9db 100644 --- a/x/oracle/keeper/params.go +++ b/x/oracle/keeper/params.go @@ -5,7 +5,6 @@ import ( "strconv" "strings" - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/cache" "github.com/ExocoreNetwork/exocore/x/oracle/types" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -40,6 +39,7 @@ func (k Keeper) RegisterNewTokenAndSetTokenFeeder(ctx sdk.Context, oInfo *types. chainID := uint64(0) for id, c := range p.Chains { if c.Name == oInfo.Chain.Name { + // #nosec G115 chainID = uint64(id) break } @@ -50,6 +50,7 @@ func (k Keeper) RegisterNewTokenAndSetTokenFeeder(ctx sdk.Context, oInfo *types. Name: oInfo.Chain.Name, Desc: oInfo.Chain.Desc, }) + // #nosec G115 chainID = uint64(len(p.Chains) - 1) } decimalInt, err := strconv.ParseInt(oInfo.Token.Decimal, 10, 32) @@ -67,16 +68,18 @@ func (k Keeper) RegisterNewTokenAndSetTokenFeeder(ctx sdk.Context, oInfo *types. intervalInt = defaultInterval } + defer func() { + if !ctx.IsCheckTx() { + k.SetParamsUpdated() + } + }() + for _, t := range p.Tokens { // token exists, bind assetID for this token // it's possible for one price bonded with multiple assetID, like ETHUSDT from sepolia/mainnet if t.Name == oInfo.Token.Name && t.ChainID == chainID { t.AssetID = strings.Join([]string{t.AssetID, oInfo.AssetID}, ",") k.SetParams(ctx, p) - if !ctx.IsCheckTx() { - _ = k.GetAggregatorContext(ctx) - k.GetCaches().AddCache(cache.ItemP(p)) - } // there should have been existing tokenFeeder running(currently we register tokens from assets-module and with infinite endBlock) return nil } @@ -94,10 +97,12 @@ func (k Keeper) RegisterNewTokenAndSetTokenFeeder(ctx sdk.Context, oInfo *types. // set a tokenFeeder for the new token p.TokenFeeders = append(p.TokenFeeders, &types.TokenFeeder{ + // #nosec G115 // len(p.Tokens) must be positive since we just append an element for it TokenID: uint64(len(p.Tokens) - 1), // we only support rule_1 for v1 - RuleID: 1, - StartRoundID: 1, + RuleID: 1, + StartRoundID: 1, + // #nosec G115 StartBaseBlock: uint64(ctx.BlockHeight() + startAfterBlocks), Interval: intervalInt, // we don't end feeders for v1 @@ -105,11 +110,5 @@ func (k Keeper) RegisterNewTokenAndSetTokenFeeder(ctx sdk.Context, oInfo *types. }) k.SetParams(ctx, p) - // skip cache update if this is not deliverTx - // for normal cosmostx, checkTx will skip actual message exucution and do anteHandler only, but from ethc.callContract the message will be executed without anteHandler check as checkTx mode. - if !ctx.IsCheckTx() { - _ = k.GetAggregatorContext(ctx) - k.GetCaches().AddCache(cache.ItemP(p)) - } return nil } diff --git a/x/oracle/keeper/params_test.go b/x/oracle/keeper/params_test.go index 21980c3f2..ca6bb3e4e 100644 --- a/x/oracle/keeper/params_test.go +++ b/x/oracle/keeper/params_test.go @@ -4,13 +4,14 @@ import ( "testing" testkeeper "github.com/ExocoreNetwork/exocore/testutil/keeper" + "github.com/ExocoreNetwork/exocore/x/oracle/keeper/testdata" "github.com/ExocoreNetwork/exocore/x/oracle/types" "github.com/stretchr/testify/require" ) func TestGetParams(t *testing.T) { k, ctx := testkeeper.OracleKeeper(t) - params := types.DefaultParams() + params := testdata.DefaultParamsForTest() k.SetParams(ctx, params) @@ -176,7 +177,7 @@ func TestUpdateTokenFeederUpdate(t *testing.T) { err: nil, }, } - p := DefaultParamsForTest() + p := testdata.DefaultParamsForTest() p.Tokens = append(p.Tokens, &types.Token{ Name: "TEST", ChainID: 1, @@ -217,14 +218,14 @@ func TestUpdateTokenFeederUpdate(t *testing.T) { } func TestParamsValidate(t *testing.T) { - p := types.DefaultParams() + p := testdata.DefaultParamsForTest() p.MaxSizePrices = 0 err := p.Validate() require.ErrorIs(t, err, types.ErrInvalidParams.Wrap("invalid MaxSizePrices")) } func TestTokenFeederValidate(t *testing.T) { - p := types.DefaultParams() + p := testdata.DefaultParamsForTest() cases := []struct { name string prevEndBlock uint64 @@ -269,7 +270,7 @@ func TestTokenFeederValidate(t *testing.T) { }, { name: "valid case with two feeders", - prevEndBlock: 1000015, + prevEndBlock: 35, feeder: &types.TokenFeeder{ TokenID: 1, RuleID: 1, @@ -282,7 +283,7 @@ func TestTokenFeederValidate(t *testing.T) { } for _, testCase := range cases { t.Run(testCase.name, func(t *testing.T) { - p = DefaultParamsForTest() + p = testdata.DefaultParamsForTest() p.TokenFeeders[1].EndBlock = testCase.prevEndBlock p.TokenFeeders = append(p.TokenFeeders, testCase.feeder) err := p.Validate() diff --git a/x/oracle/keeper/prices.go b/x/oracle/keeper/prices.go index 3a3862c67..721c27e20 100644 --- a/x/oracle/keeper/prices.go +++ b/x/oracle/keeper/prices.go @@ -2,12 +2,9 @@ package keeper import ( "encoding/binary" - "fmt" - "strings" sdkmath "cosmossdk.io/math" assetstypes "github.com/ExocoreNetwork/exocore/x/assets/types" - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/common" "github.com/ExocoreNetwork/exocore/x/oracle/types" "github.com/cosmos/cosmos-sdk/store/prefix" sdk "github.com/cosmos/cosmos-sdk/types" @@ -34,12 +31,15 @@ func (k Keeper) GetPrices( val.TokenID = tokenID val.NextRoundID = nextRoundID var i uint64 - if nextRoundID <= uint64(common.MaxSizePrices) { + maxSizePrices := k.FeederManager.GetMaxSizePricesFromCache() + // #nosec G115 + if nextRoundID <= uint64(maxSizePrices) { i = 1 val.PriceList = make([]*types.PriceTimeRound, 0, nextRoundID) } else { - i = nextRoundID - uint64(common.MaxSizePrices) - val.PriceList = make([]*types.PriceTimeRound, 0, common.MaxSizePrices) + // #nosec G11 + i = nextRoundID - uint64(maxSizePrices) + val.PriceList = make([]*types.PriceTimeRound, 0, maxSizePrices) } for ; i < nextRoundID; i++ { b := store.Get(types.PricesRoundKey(i)) @@ -63,17 +63,13 @@ func (k Keeper) GetSpecifiedAssetsPrice(ctx sdk.Context, assetID string) (types. }, nil } - var p types.Params // get params from cache if exists - if k.memStore.agc != nil { - p = k.memStore.agc.GetParams() - } else { - p = k.GetParams(ctx) - } + p := k.GetParamsFromCache() tokenID := p.GetTokenIDFromAssetID(assetID) if tokenID == 0 { return types.Price{}, types.ErrGetPriceAssetNotFound.Wrapf("assetID does not exist in oracle %s", assetID) } + // #nosec G115 price, found := k.GetPriceTRLatest(ctx, uint64(tokenID)) if !found { return types.Price{ @@ -97,13 +93,8 @@ func (k Keeper) GetSpecifiedAssetsPrice(ctx sdk.Context, assetID string) (types. // return latest price for assets func (k Keeper) GetMultipleAssetsPrices(ctx sdk.Context, assets map[string]interface{}) (prices map[string]types.Price, err error) { - var p types.Params // get params from cache if exists - if k.memStore.agc != nil { - p = k.memStore.agc.GetParams() - } else { - p = k.GetParams(ctx) - } + p := k.GetParamsFromCache() // ret := make(map[string]types.Price) prices = make(map[string]types.Price) info := "" @@ -122,6 +113,7 @@ func (k Keeper) GetMultipleAssetsPrices(ctx sdk.Context, assets map[string]inter prices = nil break } + // #nosec G115 price, found := k.GetPriceTRLatest(ctx, uint64(tokenID)) if !found { info = info + assetID + " " @@ -200,56 +192,60 @@ func (k Keeper) GetAllPrices(ctx sdk.Context) (list []types.Prices) { } // AppenPriceTR append a new round of price for specific token, return false if the roundID not match -func (k Keeper) AppendPriceTR(ctx sdk.Context, tokenID uint64, priceTR types.PriceTimeRound) bool { +func (k Keeper) AppendPriceTR(ctx sdk.Context, tokenID uint64, priceTR types.PriceTimeRound, detID string) bool { nextRoundID := k.GetNextRoundID(ctx, tokenID) + logger := k.Logger(ctx) // This should not happen if nextRoundID != priceTR.RoundID { + logger.Error("roundID not match", "nextRoundID", nextRoundID, "priceTR.RoundID", priceTR.RoundID) return false } store := k.getPriceTRStore(ctx, tokenID) b := k.cdc.MustMarshal(&priceTR) store.Set(types.PricesRoundKey(nextRoundID), b) - if expiredRoundID := nextRoundID - k.memStore.agc.GetParamsMaxSizePrices(); expiredRoundID > 0 { + + p := *k.GetParamsFromCache() + // #nosec G115 // maxSizePrices is not negative + if expiredRoundID := nextRoundID - uint64(p.MaxSizePrices); expiredRoundID > 0 { store.Delete(types.PricesRoundKey(expiredRoundID)) } - roundID := k.IncreaseNextRoundID(ctx, tokenID) + k.IncreaseNextRoundID(ctx, tokenID) - // update for native tokens - // TODO: set hooks as a genral approach - var p types.Params - // get params from cache if exists - if k.memStore.agc != nil { - p = k.memStore.agc.GetParams() - } else { - p = k.GetParams(ctx) + if len(priceTR.Price) == 0 { + return true } - assetIDs := p.GetAssetIDsFromTokenID(tokenID) - for _, assetID := range assetIDs { - if nstChain, ok := strings.CutPrefix(assetID, types.NSTIDPrefix); ok { - if err := k.UpdateNSTByBalanceChange(ctx, fmt.Sprintf("%s%s", NSTETHAssetAddr, nstChain), []byte(priceTR.Price), roundID); err != nil { - // we just report this error in log to notify validators - k.Logger(ctx).Error(types.ErrUpdateNativeTokenVirtualPriceFail.Error(), "error", err) - } + if nstAssetID := p.GetAssetIDForNSTFromTokenID(tokenID); len(nstAssetID) > 0 { + nstVersion, err := getNSTVersionFromDetID(detID) + if err != nil || nstVersion == 0 { + logger.Error(types.ErrUpdateNativeTokenVirtualPriceFail.Error(), "error", err, "nstVersion", nstVersion, "tokenID", tokenID, "roundID", nextRoundID) + return true + } + err = k.UpdateNSTByBalanceChange(ctx, nstAssetID, priceTR, nstVersion) + if err != nil { + // we just report this error in log to notify validators + logger.Error(types.ErrUpdateNativeTokenVirtualPriceFail.Error(), "error", err) + } else { + logger.Info("updated balance change for NST") } } - return true } // GrowRoundID Increases roundID with the previous price +// func (k Keeper) GrowRoundID(ctx sdk.Context, tokenID uint64) (price *types.PriceTimeRound, roundID uint64) { func (k Keeper) GrowRoundID(ctx sdk.Context, tokenID uint64) (price string, roundID uint64) { if pTR, ok := k.GetPriceTRLatest(ctx, tokenID); ok { pTR.RoundID++ - k.AppendPriceTR(ctx, tokenID, pTR) + k.AppendPriceTR(ctx, tokenID, pTR, "") price = pTR.Price roundID = pTR.RoundID } else { nextRoundID := k.GetNextRoundID(ctx, tokenID) k.AppendPriceTR(ctx, tokenID, types.PriceTimeRound{ RoundID: nextRoundID, - }) - price = "" + }, "") roundID = nextRoundID + price = "" } return } diff --git a/x/oracle/keeper/prices_test.go b/x/oracle/keeper/prices_test.go index cad34ee6b..4d84f4e7a 100644 --- a/x/oracle/keeper/prices_test.go +++ b/x/oracle/keeper/prices_test.go @@ -48,6 +48,8 @@ func TestPricesGet(t *testing.T) { func TestPricesGetMultiAssets(t *testing.T) { keeper, ctx := keepertest.OracleKeeper(t) + keeper.FeederManager.SetNilCaches() + keeper.FeederManager.BeginBlock(ctx) keeper.SetPrices(ctx, testdata.P1) assets := make(map[string]interface{}) assets["0x0b34c4d876cd569129cf56bafabb3f9e97a4ff42_0x9ce1"] = new(interface{}) diff --git a/x/oracle/keeper/query_native_token.go b/x/oracle/keeper/query_native_token.go index 3b5d749cd..09910131d 100644 --- a/x/oracle/keeper/query_native_token.go +++ b/x/oracle/keeper/query_native_token.go @@ -24,7 +24,14 @@ func (k Keeper) StakerInfos(goCtx context.Context, req *types.QueryStakerInfosRe return nil, ErrUnsupportedAsset } ctx := sdk.UnwrapSDKContext(goCtx) - return k.GetStakerInfos(ctx, req) + + stakerInfosResp, err := k.GetStakerInfos(ctx, req) + if err != nil { + return stakerInfosResp, err + } + version := k.GetNSTVersion(ctx, req.AssetId) + stakerInfosResp.Version = version + return stakerInfosResp, nil } func (k Keeper) StakerInfo(goCtx context.Context, req *types.QueryStakerInfoRequest) (*types.QueryStakerInfoResponse, error) { @@ -36,7 +43,8 @@ func (k Keeper) StakerInfo(goCtx context.Context, req *types.QueryStakerInfoRequ } ctx := sdk.UnwrapSDKContext(goCtx) stakerInfo := k.GetStakerInfo(ctx, req.AssetId, req.StakerAddr) - return &types.QueryStakerInfoResponse{StakerInfo: &stakerInfo}, nil + version := k.GetNSTVersion(ctx, req.AssetId) + return &types.QueryStakerInfoResponse{Version: version, StakerInfo: &stakerInfo}, nil } func (k Keeper) StakerList(goCtx context.Context, req *types.QueryStakerListRequest) (*types.QueryStakerListResponse, error) { @@ -48,5 +56,6 @@ func (k Keeper) StakerList(goCtx context.Context, req *types.QueryStakerListRequ } ctx := sdk.UnwrapSDKContext(goCtx) stakerList := k.GetStakerList(ctx, req.AssetId) - return &types.QueryStakerListResponse{StakerList: &stakerList}, nil + version := k.GetNSTVersion(ctx, req.AssetId) + return &types.QueryStakerListResponse{Version: version, StakerList: &stakerList}, nil } diff --git a/x/oracle/keeper/recent_msg.go b/x/oracle/keeper/recent_msg.go index bf7f25983..d6094fbe4 100644 --- a/x/oracle/keeper/recent_msg.go +++ b/x/oracle/keeper/recent_msg.go @@ -6,6 +6,29 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" ) +// SetMsgItemsForCache set a specific recentMsg with its height as index in the store +func (k Keeper) SetMsgItemsForCache(ctx sdk.Context, recentMsg types.RecentMsg) { + index, found := k.GetIndexRecentMsg(ctx) + block := uint64(ctx.BlockHeight()) + if found { + i := 0 + maxNonce := k.GetParams(ctx).MaxNonce + for ; i < len(index.Index); i++ { + b := index.Index[i] + // #nosec G115 // maxNonce is not negative + if block < uint64(maxNonce) || b > block-uint64(maxNonce) { + break + } + // remove old recentMsg + k.RemoveRecentMsg(ctx, b) + } + index.Index = index.Index[i:] + } + index.Index = append(index.Index, block) + k.SetIndexRecentMsg(ctx, index) + k.SetRecentMsg(ctx, recentMsg) +} + // SetRecentMsg set a specific recentMsg in the store from its index func (k Keeper) SetRecentMsg(ctx sdk.Context, recentMsg types.RecentMsg) { store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.RecentMsgKeyPrefix)) diff --git a/x/oracle/keeper/recent_params.go b/x/oracle/keeper/recent_params.go index efd4af316..e43cf9d27 100644 --- a/x/oracle/keeper/recent_params.go +++ b/x/oracle/keeper/recent_params.go @@ -6,6 +6,38 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" ) +func (k Keeper) SetParamsForCache(ctx sdk.Context, params types.RecentParams) { + block := uint64(ctx.BlockHeight()) + index, found := k.GetIndexRecentParams(ctx) + if found { + // if the maxNonce is changed in this block, all rounds would be force sealed, so it's ok to use either the old or new maxNonce + maxNonce := k.GetParams(ctx).MaxNonce + l := len(index.Index) + if l > 0 { + // keep at least one history params before appending current new params + prev := index.Index[0] + idx := 0 + // #nosec G115 + if prev <= block-uint64(maxNonce) && l > 1 { + for i := 1; i < l; i++ { + k.RemoveRecentParams(ctx, prev) + b := index.Index[i] + // #nosec G115 + if b > block-uint64(maxNonce) { + break + } + prev = b + idx = i + } + } + index.Index = index.Index[idx:] + } + } + index.Index = append(index.Index, block) + k.SetIndexRecentParams(ctx, index) + k.SetRecentParams(ctx, params) +} + // SetRecentParams set a specific recentParams in the store from its index func (k Keeper) SetRecentParams(ctx sdk.Context, recentParams types.RecentParams) { store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.RecentParamsKeyPrefix)) @@ -77,3 +109,40 @@ func (k Keeper) GetAllRecentParamsAsMap(ctx sdk.Context) (result map[int64]*type return } + +// GetRecentParamsWithinMaxNonce returns all recentParams within the maxNonce and the latest recentParams separately +func (k Keeper) GetRecentParamsWithinMaxNonce(ctx sdk.Context) (recentParamsList []*types.RecentParams, prev, latest types.RecentParams) { + maxNonce := k.GetParams(ctx).MaxNonce + var startHeight uint64 + if uint64(ctx.BlockHeight()) > uint64(maxNonce) { + startHeight = uint64(ctx.BlockHeight()) - uint64(maxNonce) + } + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.RecentParamsKeyPrefix)) + iterator := sdk.KVStorePrefixIterator(store, []byte{}) + + defer iterator.Close() + + recentParamsList = make([]*types.RecentParams, 0, maxNonce) + notFound := true + for ; iterator.Valid(); iterator.Next() { + var val types.RecentParams + k.cdc.MustUnmarshal(iterator.Value(), &val) + latest = val + if val.Block >= startHeight { + if notFound { + notFound = false + } + recentParamsList = append(recentParamsList, &val) + } + if notFound { + prev = val + } + + } + if len(recentParamsList) > 0 { + if prev.Block == recentParamsList[0].Block { + prev = types.RecentParams{} + } + } + return recentParamsList, prev, latest +} diff --git a/x/oracle/keeper/single.go b/x/oracle/keeper/single.go deleted file mode 100644 index b40668333..000000000 --- a/x/oracle/keeper/single.go +++ /dev/null @@ -1,225 +0,0 @@ -package keeper - -import ( - "math/big" - "strconv" - - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/aggregator" - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/cache" - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/common" - "github.com/ExocoreNetwork/exocore/x/oracle/types" - sdk "github.com/cosmos/cosmos-sdk/types" -) - -func (k *Keeper) GetCaches() *cache.Cache { - if k.memStore.cs != nil { - return k.memStore.cs - } - k.memStore.cs = cache.NewCache() - return k.memStore.cs -} - -// GetAggregatorContext returns singleton aggregatorContext used to calculate final price for each round of each tokenFeeder -func (k *Keeper) GetAggregatorContext(ctx sdk.Context) *aggregator.AggregatorContext { - if ctx.IsCheckTx() { - if k.memStore.agcCheckTx != nil { - return k.memStore.agcCheckTx - } - if k.memStore.agc == nil { - c := k.GetCaches() - c.ResetCaches() - k.memStore.agcCheckTx = aggregator.NewAggregatorContext() - if ok := k.recacheAggregatorContext(ctx, k.memStore.agcCheckTx, c); !ok { - // this is the very first time oracle has been started, fill relalted info as initialization - initAggregatorContext(ctx, k.memStore.agcCheckTx, k, c) - } - return k.memStore.agcCheckTx - } - k.memStore.agcCheckTx = k.memStore.agc.Copy4CheckTx() - return k.memStore.agcCheckTx - } - - if k.memStore.agc != nil { - return k.memStore.agc - } - - c := k.GetCaches() - c.ResetCaches() - k.memStore.agc = aggregator.NewAggregatorContext() - if ok := k.recacheAggregatorContext(ctx, k.memStore.agc, c); !ok { - // this is the very first time oracle has been started, fill relalted info as initialization - initAggregatorContext(ctx, k.memStore.agc, k, c) - } else { - // this is when a node restart and use the persistent state to refill cache, we don't need to commit these data again - c.SkipCommit() - } - return k.memStore.agc -} - -func (k Keeper) recacheAggregatorContext(ctx sdk.Context, agc *aggregator.AggregatorContext, c *cache.Cache) bool { - logger := k.Logger(ctx) - oracleParams := k.GetParams(ctx) - from := ctx.BlockHeight() - int64(oracleParams.MaxNonce) + 1 - to := ctx.BlockHeight() - - h, ok := k.GetValidatorUpdateBlock(ctx) - recentParamsMap := k.GetAllRecentParamsAsMap(ctx) - if !ok || len(recentParamsMap) == 0 { - logger.Info("recacheAggregatorContext: no validatorUpdateBlock found, go to initial process", "height", ctx.BlockHeight()) - // no cache, this is the very first running, so go to initial process instead - return false - } - - forceSealHeight := h.Block - // #nosec G115 - if int64(forceSealHeight) >= from { - from = int64(h.Block) + 1 - logger.Info("recacheAggregatorContext: with validatorSet updated recently", "latestValidatorUpdateBlock", h.Block, "currentHeight", ctx.BlockHeight()) - } - - logger.Info("recacheAggregatorContext", "from", from, "to", to, "height", ctx.BlockHeight()) - totalPower := big.NewInt(0) - validatorPowers := make(map[string]*big.Int) - validatorSet := k.GetAllExocoreValidators(ctx) - for _, v := range validatorSet { - validatorPowers[sdk.ConsAddress(v.Address).String()] = big.NewInt(v.Power) - totalPower = new(big.Int).Add(totalPower, big.NewInt(v.Power)) - } - agc.SetValidatorPowers(validatorPowers) - - // reset validators - c.AddCache(cache.ItemV(validatorPowers)) - - recentMsgs := k.GetAllRecentMsgAsMap(ctx) - var p *types.Params - var b int64 - if from >= to { - // backwards compatible for that the validatorUpdateBlock updated every block - prev := int64(0) - for b = range recentParamsMap { - if b > prev { - prev = b - } - } - p = recentParamsMap[prev] - agc.SetParams(p) - setCommonParams(p) - } else { - prev := int64(0) - for ; from < to; from++ { - // fill params - for b, p = range recentParamsMap { - // find the params which is the latest one before the replayed block height since prepareRoundEndBlock will use it and it should be the latest one before current block - if b < from && b > prev { - agc.SetParams(p) - prev = b - setCommonParams(p) - delete(recentParamsMap, b) - } - } - - logger.Info("recacheAggregatorContext: prepareRoundEndBlock", "baseBlock", from-1, "forceSealHeight", forceSealHeight) - agc.PrepareRoundEndBlock(ctx, from-1, forceSealHeight) - - if msgs := recentMsgs[from]; msgs != nil { - for _, msg := range msgs { - // these messages are retreived for recache, just skip the validation check and fill the memory cache - //nolint - agc.FillPrice(&types.MsgCreatePrice{ - Creator: msg.Validator, - FeederID: msg.FeederID, - Prices: msg.PSources, - }) - } - } - ctxReplay := ctx.WithBlockHeight(from) - logger.Info("recacheAggregatorContext: sealRound", "blockEnd", from) - agc.SealRound(ctxReplay, false) - } - - for b, p = range recentParamsMap { - // use the latest params before the current block height - if b < to && b > prev { - agc.SetParams(p) - prev = b - setCommonParams(p) - } - } - } - logger.Info("recacheAggregatorContext: PrepareRoundEndBlock", "baseBlock", to-1) - agc.PrepareRoundEndBlock(ctx, to-1, forceSealHeight) - - var pRet cache.ItemP - if updated := c.GetCache(&pRet); !updated { - c.AddCache(cache.ItemP(*p)) - } - // TODO: these 4 lines are mainly used for hot fix - // since the latest params stored in KV for recache should be the same with the latest params, so these lines are just duplicated actions if everything is fine. - *p = k.GetParams(ctx) - agc.SetParams(p) - setCommonParams(p) - c.AddCache(cache.ItemP(*p)) - - return true -} - -func initAggregatorContext(ctx sdk.Context, agc *aggregator.AggregatorContext, k *Keeper, c *cache.Cache) { - ctx.Logger().Info("initAggregatorContext", "height", ctx.BlockHeight()) - // set params - p := k.GetParams(ctx) - agc.SetParams(&p) - // set params cache - c.AddCache(cache.ItemP(p)) - setCommonParams(&p) - - totalPower := big.NewInt(0) - validatorPowers := make(map[string]*big.Int) - validatorSet := k.GetAllExocoreValidators(ctx) - for _, v := range validatorSet { - validatorPowers[sdk.ConsAddress(v.Address).String()] = big.NewInt(v.Power) - totalPower = new(big.Int).Add(totalPower, big.NewInt(v.Power)) - } - - agc.SetValidatorPowers(validatorPowers) - // set validatorPower cache - c.AddCache(cache.ItemV(validatorPowers)) - - agc.PrepareRoundEndBlock(ctx, ctx.BlockHeight()-1, 0) -} - -func (k *Keeper) ResetAggregatorContext() { - k.memStore.agc = nil -} - -func (k *Keeper) ResetCache() { - k.memStore.cs = nil -} - -func (k *Keeper) ResetAggregatorContextCheckTx() { - k.memStore.agcCheckTx = nil -} - -// setCommonParams save static fields in params in memory cache since these fields will not change during node running -// TODO: further when params is abled to be updated through tx/gov, this cache should be taken care if any is available to be changed -func setCommonParams(p *types.Params) { - common.MaxNonce = p.MaxNonce - common.ThresholdA = p.ThresholdA - common.ThresholdB = p.ThresholdB - common.MaxDetID = p.MaxDetId - common.Mode = p.Mode - common.MaxSizePrices = int(p.MaxSizePrices) -} - -func (k *Keeper) ResetUpdatedFeederIDs() { - if k.memStore.updatedFeederIDs != nil { - k.memStore.updatedFeederIDs = nil - } -} - -func (k Keeper) GetUpdatedFeederIDs() []string { - return k.memStore.updatedFeederIDs -} - -func (k *Keeper) AppendUpdatedFeederIDs(id uint64) { - k.memStore.updatedFeederIDs = append(k.memStore.updatedFeederIDs, strconv.FormatUint(id, 10)) -} diff --git a/x/oracle/keeper/slashing.go b/x/oracle/keeper/slashing.go index b05c761e8..3e7ea7e14 100644 --- a/x/oracle/keeper/slashing.go +++ b/x/oracle/keeper/slashing.go @@ -26,6 +26,15 @@ func (k Keeper) InitValidatorReportInfo(ctx sdk.Context, validator string, heigh } } +func (k Keeper) ClearAllValidatorReportInfo(ctx sdk.Context) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.ValidatorReportInfoPrefix) + iterator := sdk.KVStorePrefixIterator(store, []byte{}) + for ; iterator.Valid(); iterator.Next() { + store.Delete(iterator.Key()) + } + iterator.Close() +} + // SetValidatorReportInfo sets the validator reporting info for a validator func (k Keeper) SetValidatorReportInfo(ctx sdk.Context, validator string, info types.ValidatorReportInfo) { store := ctx.KVStore(k.storeKey) @@ -70,11 +79,6 @@ func (k Keeper) GetReportedRoundsWindow(ctx sdk.Context) int64 { return k.GetParams(ctx).Slashing.ReportedRoundsWindow } -// GetSlashFractionMiss fraction of power slashed for missed rounds -func (k Keeper) GetSlashFractionMiss(ctx sdk.Context) (res sdk.Dec) { - return k.GetParams(ctx).Slashing.SlashFractionMiss -} - // GetSlashFractionMalicious fraction returns the fraction of power slashed for malicious behavior func (k Keeper) GetSlashFractionMalicious(ctx sdk.Context) (res sdk.Dec) { return k.GetParams(ctx).Slashing.SlashFractionMalicious @@ -118,7 +122,7 @@ func (k Keeper) IterateValidatorReportInfos(ctx sdk.Context, handler func(addres // IterateValidatorMissedRoundBitArrray iterates all missed rounds in one performance window of rounds func (k Keeper) IterateValidatorMissedRoundBitArray(ctx sdk.Context, validator string, handler func(index uint64, missed bool) (stop bool)) { - store := prefix.NewStore(ctx.KVStore(k.storeKey), types.SlashingMissedBitArrayPrefix(validator)) + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.SlashingMissedBitArrayValidatorPrefix(validator)) iterator := sdk.KVStorePrefixIterator(store, []byte{}) defer iterator.Close() for ; iterator.Valid(); iterator.Next() { @@ -145,7 +149,17 @@ func (k Keeper) GetValidatorMissedRounds(ctx sdk.Context, address string) []*typ // ClearValidatorMissedBlockBitArray deletes every instance of ValidatorMissedBlockBitArray in the store func (k Keeper) ClearValidatorMissedRoundBitArray(ctx sdk.Context, validator string) { store := ctx.KVStore(k.storeKey) - iterator := sdk.KVStorePrefixIterator(store, types.SlashingMissedBitArrayPrefix(validator)) + iterator := sdk.KVStorePrefixIterator(store, types.SlashingMissedBitArrayValidatorPrefix(validator)) + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + store.Delete(iterator.Key()) + } +} + +// ClearAllValidatorMissedRoundBitArray clear all instances of ValidatorMissedBlockBitArray in the store +func (k Keeper) ClearAllValidatorMissedRoundBitArray(ctx sdk.Context) { + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, types.MissedBitArrayPrefix) defer iterator.Close() for ; iterator.Valid(); iterator.Next() { store.Delete(iterator.Key()) diff --git a/x/oracle/keeper/testdata/types.go b/x/oracle/keeper/testdata/types.go new file mode 100644 index 000000000..fa74b3944 --- /dev/null +++ b/x/oracle/keeper/testdata/types.go @@ -0,0 +1,76 @@ +package testdata + +import ( + "time" + + sdkmath "cosmossdk.io/math" + "github.com/ExocoreNetwork/exocore/x/oracle/types" +) + +func DefaultParamsForTest() types.Params { + return types.Params{ + Chains: []*types.Chain{ + {Name: "-", Desc: "-"}, + {Name: "Ethereum", Desc: "-"}, + }, + Tokens: []*types.Token{ + {}, + { + Name: "ETH", + ChainID: 1, + ContractAddress: "0x", + Decimal: 8, + Active: true, + AssetID: "0x0b34c4d876cd569129cf56bafabb3f9e97a4ff42_0x9ce1", + }, + }, + // source defines where to fetch the prices + Sources: []*types.Source{ + { + Name: "0 position is reserved", + }, + { + Name: "Chainlink", + Entry: &types.Endpoint{ + Offchain: map[uint64]string{0: ""}, + }, + Valid: true, + Deterministic: true, + }, + }, + // rules defines price from which sources are accepted, could be used to proof malicious + Rules: []*types.RuleSource{ + // 0 is reserved + {}, + { + // all sources math + SourceIDs: []uint64{0}, + }, + }, + // TokenFeeder describes when a token start to be updated with its price, and the frequency, endTime. + TokenFeeders: []*types.TokenFeeder{ + {}, + { + TokenID: 1, + RuleID: 1, + StartRoundID: 1, + StartBaseBlock: 20, + Interval: 10, + }, + }, + MaxNonce: 3, + ThresholdA: 2, + ThresholdB: 3, + // V1 set mode to 1 + Mode: types.ConsensusModeASAP, + MaxDetId: 5, + MaxSizePrices: 100, + Slashing: &types.SlashingParams{ + ReportedRoundsWindow: 100, + MinReportedPerWindow: sdkmath.LegacyNewDec(1).Quo(sdkmath.LegacyNewDec(2)), + OracleMissJailDuration: 600 * time.Second, + OracleMaliciousJailDuration: 30 * 24 * time.Hour, + SlashFractionMalicious: sdkmath.LegacyNewDec(1).Quo(sdkmath.LegacyNewDec(10)), + }, + } +} diff --git a/x/oracle/keeper/tokens.go b/x/oracle/keeper/tokens.go index 6b97e8733..3221af141 100644 --- a/x/oracle/keeper/tokens.go +++ b/x/oracle/keeper/tokens.go @@ -12,6 +12,7 @@ func (k Keeper) GetTokens(ctx sdk.Context) []*types.TokenIndex { for idx, token := range params.Tokens { ret = append(ret, &types.TokenIndex{ Token: token.Name, + // #nosec G115 Index: uint64(idx), }) } diff --git a/x/oracle/keeper/validate_timestamp.go b/x/oracle/keeper/validate_timestamp.go new file mode 100644 index 000000000..4fb0f8ac7 --- /dev/null +++ b/x/oracle/keeper/validate_timestamp.go @@ -0,0 +1,31 @@ +package keeper + +import ( + "context" + "fmt" + "time" + + "github.com/ExocoreNetwork/exocore/x/oracle/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func checkTimestamp(goCtx context.Context, msg *types.MsgCreatePrice) error { + ctx := sdk.UnwrapSDKContext(goCtx) + now := ctx.BlockTime().UTC() + for _, ps := range msg.Prices { + for _, price := range ps.Prices { + ts := price.Timestamp + if len(ts) == 0 { + return fmt.Errorf("timestamp should not be empty, blockTime:%s, got:%s", now.Format(layout), ts) + } + t, err := time.ParseInLocation(layout, ts, time.UTC) + if err != nil { + return fmt.Errorf("timestamp format invalid, blockTime:%s, got:%s", now.Format(layout), ts) + } + if now.Add(maxFutureOffset).Before(t) { + return fmt.Errorf("timestamp is in the future, blockTime:%s, got:%s", now.Format(layout), ts) + } + } + } + return nil +} diff --git a/x/oracle/keeper/validator_update_block.go b/x/oracle/keeper/validator_update_block.go index e5e6ea96d..9b5cfa97d 100644 --- a/x/oracle/keeper/validator_update_block.go +++ b/x/oracle/keeper/validator_update_block.go @@ -6,8 +6,7 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" ) -// SetValidatorUpdateBlock set validatorUpdateBlock in the store -func (k Keeper) SetValidatorUpdateBlock(ctx sdk.Context, validatorUpdateBlock types.ValidatorUpdateBlock) { +func (k Keeper) SetValidatorUpdateForCache(ctx sdk.Context, validatorUpdateBlock types.ValidatorUpdateBlock) { store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.ValidatorUpdateBlockKey)) b := k.cdc.MustMarshal(&validatorUpdateBlock) store.Set(types.BlockKey, b) diff --git a/x/oracle/keeper/validator_update_block_test.go b/x/oracle/keeper/validator_update_block_test.go index 56bd66356..b128ed325 100644 --- a/x/oracle/keeper/validator_update_block_test.go +++ b/x/oracle/keeper/validator_update_block_test.go @@ -14,7 +14,7 @@ import ( func createTestValidatorUpdateBlock(keeper *keeper.Keeper, ctx sdk.Context) types.ValidatorUpdateBlock { item := types.ValidatorUpdateBlock{} - keeper.SetValidatorUpdateBlock(ctx, item) + keeper.SetValidatorUpdateForCache(ctx, item) return item } diff --git a/x/oracle/module.go b/x/oracle/module.go index 097376d29..b2745b2ae 100644 --- a/x/oracle/module.go +++ b/x/oracle/module.go @@ -4,9 +4,6 @@ import ( "context" "encoding/json" "fmt" - "math/big" - "sort" - "strings" // this line is used by starport scaffolding # 1 @@ -15,16 +12,13 @@ import ( "github.com/ExocoreNetwork/exocore/x/oracle/client/cli" "github.com/ExocoreNetwork/exocore/x/oracle/keeper" - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/cache" "github.com/ExocoreNetwork/exocore/x/oracle/types" abci "github.com/cometbft/cometbft/abci/types" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/codec" cdctypes "github.com/cosmos/cosmos-sdk/codec/types" - cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/module" - stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" ) var ( @@ -151,244 +145,8 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.Raw // ConsensusVersion is a sequence number for state-breaking change of the module. It should be incremented on each consensus-breaking change introduced by the module. To avoid wrong/empty versions, the initial version should be set to 1 func (AppModule) ConsensusVersion() uint64 { return 1 } -// BeginBlock contains the logic that is automatically triggered at the beginning of each block -func (am AppModule) BeginBlock(ctx sdk.Context, _ abci.RequestBeginBlock) { - // init caches and aggregatorContext for node restart - // TODO: try better way to init caches and aggregatorContext than beginBlock - _ = am.keeper.GetCaches() - agc := am.keeper.GetAggregatorContext(ctx) - validatorPowers := agc.GetValidatorPowers() - // set validatorReportInfo to track performance - for validator := range validatorPowers { - am.keeper.InitValidatorReportInfo(ctx, validator, ctx.BlockHeight()) - } -} - // EndBlock contains the logic that is automatically triggered at the end of each block func (am AppModule) EndBlock(ctx sdk.Context, _ abci.RequestEndBlock) []abci.ValidatorUpdate { - cs := am.keeper.GetCaches() - validatorUpdates := am.keeper.GetValidatorUpdates(ctx) - forceSeal := false - agc := am.keeper.GetAggregatorContext(ctx) - - logger := am.keeper.Logger(ctx) - height := ctx.BlockHeight() - if len(validatorUpdates) > 0 { - validatorList := make(map[string]*big.Int) - for _, vu := range validatorUpdates { - pubKey, _ := cryptocodec.FromTmProtoPublicKey(vu.PubKey) - validatorStr := sdk.ConsAddress(pubKey.Address()).String() - validatorList[validatorStr] = big.NewInt(vu.Power) - // add possible new added validator info for slashing tracking - if vu.Power > 0 { - am.keeper.InitValidatorReportInfo(ctx, validatorStr, height) - } - } - // update validator set information in cache - cs.AddCache(cache.ItemV(validatorList)) - validatorPowers := make(map[string]*big.Int) - cs.GetCache(cache.ItemV(validatorPowers)) - // update validatorPowerList in aggregatorContext - agc.SetValidatorPowers(validatorPowers) - // TODO: seal all alive round since validatorSet changed here - forceSeal = true - logger.Info("validator set changed, force seal all active rounds", "height", height) - } - - // TODO: for v1 use mode==1, just check the failed feeders - _, failed, _, windowClosed := agc.SealRound(ctx, forceSeal) - defer func() { - logger.Debug("remove aggregators(workers) on window closed", "feederIDs", windowClosed) - for _, feederID := range windowClosed { - agc.RemoveWorker(feederID) - am.keeper.RemoveNonceWithFeederIDForValidators(ctx, feederID, agc.GetValidators()) - } - }() - // update&check slashing info - validatorPowers := agc.GetValidatorPowers() - validators := make([]string, 0, len(validatorPowers)) - for validator := range validatorPowers { - validators = append(validators, validator) - } - sort.Strings(validators) - for _, validator := range validators { - power := validatorPowers[validator] - reportedInfo, found := am.keeper.GetValidatorReportInfo(ctx, validator) - if !found { - logger.Error(fmt.Sprintf("Expected report info for validator %s but not found", validator)) - continue - } - // TODO: for the round calculation, now only sourceID=1 is used so {feederID, sourceID} have only one value for each feederID which corresponding to one round. - // But when we came to multiple sources, we should consider the round corresponding to feedeerID instead of {feederID, sourceID} - for _, finalPrice := range agc.GetFinalPriceListForFeederIDs(windowClosed) { - exist, matched := agc.PerformanceReview(ctx, finalPrice, validator) - if exist && !matched { - // TODO: malicious price, just slash&jail immediately - logger.Info( - "confirmed malicious price", - "validator", validator, - "infraction_height", height, - "infraction_time", ctx.BlockTime(), - "feederID", finalPrice.FeederID, - "detID", finalPrice.DetID, - "sourceID", finalPrice.SourceID, - "finalPrice", finalPrice.Price, - ) - consAddr, err := sdk.ConsAddressFromBech32(validator) - if err != nil { - panic("invalid consAddr string") - } - - operator := am.keeper.ValidatorByConsAddr(ctx, consAddr) - if operator != nil && !operator.IsJailed() { - coinsBurned := am.keeper.SlashWithInfractionReason(ctx, consAddr, height, power.Int64(), am.keeper.GetSlashFractionMalicious(ctx), stakingtypes.Infraction_INFRACTION_UNSPECIFIED) - ctx.EventManager().EmitEvent( - sdk.NewEvent( - types.EventTypeOracleSlash, - sdk.NewAttribute(types.AttributeKeyValidatorKey, validator), - sdk.NewAttribute(types.AttributeKeyPower, fmt.Sprintf("%d", power)), - sdk.NewAttribute(types.AttributeKeyReason, types.AttributeValueMaliciousReportPrice), - sdk.NewAttribute(types.AttributeKeyJailed, validator), - sdk.NewAttribute(types.AttributeKeyBurnedCoins, coinsBurned.String()), - ), - ) - am.keeper.Jail(ctx, consAddr) - jailUntil := ctx.BlockHeader().Time.Add(am.keeper.GetMaliciousJailDuration(ctx)) - am.keeper.JailUntil(ctx, consAddr, jailUntil) - reportedInfo.MissedRoundsCounter = 0 - reportedInfo.IndexOffset = 0 - am.keeper.ClearValidatorMissedRoundBitArray(ctx, validator) - } - continue - } - - reportedRoundsWindow := am.keeper.GetReportedRoundsWindow(ctx) - index := uint64(reportedInfo.IndexOffset % reportedRoundsWindow) - reportedInfo.IndexOffset++ - // Update reported round bit array & counter - // This counter just tracks the sum of the bit array - // That way we avoid needing to read/write the whole array each time - previous := am.keeper.GetValidatorMissedRoundBitArray(ctx, validator, index) - missed := !exist - switch { - case !previous && missed: - // Array value has changed from not missed to missed, increment counter - am.keeper.SetValidatorMissedRoundBitArray(ctx, validator, index, true) - reportedInfo.MissedRoundsCounter++ - case previous && !missed: - // Array value has changed from missed to not missed, decrement counter - am.keeper.SetValidatorMissedRoundBitArray(ctx, validator, index, false) - reportedInfo.MissedRoundsCounter-- - default: - // Array value at this index has not changed, no need to update counter - } - - minReportedPerWindow := am.keeper.GetMinReportedPerWindow(ctx) - - if missed { - ctx.EventManager().EmitEvent( - sdk.NewEvent( - types.EventTypeOracleLiveness, - sdk.NewAttribute(types.AttributeKeyValidatorKey, validator), - sdk.NewAttribute(types.AttributeKeyMissedRounds, fmt.Sprintf("%d", reportedInfo.MissedRoundsCounter)), - sdk.NewAttribute(types.AttributeKeyHeight, fmt.Sprintf("%d", height)), - ), - ) - - logger.Debug( - "absent validator", - "height", ctx.BlockHeight(), - "validator", validator, - "missed", reportedInfo.MissedRoundsCounter, - "threshold", minReportedPerWindow, - ) - } - - minHeight := reportedInfo.StartHeight + reportedRoundsWindow - maxMissed := reportedRoundsWindow - minReportedPerWindow - // if we are past the minimum height and the validator has missed too many rounds reporting prices, punish them - if height > minHeight && reportedInfo.MissedRoundsCounter > maxMissed { - consAddr, err := sdk.ConsAddressFromBech32(validator) - if err != nil { - panic("invalid consAddr string") - } - operator := am.keeper.ValidatorByConsAddr(ctx, consAddr) - if operator != nil && !operator.IsJailed() { - // missing rounds confirmed: slash and jail the validator - coinsBurned := am.keeper.SlashWithInfractionReason(ctx, consAddr, height, power.Int64(), am.keeper.GetSlashFractionMiss(ctx), stakingtypes.Infraction_INFRACTION_UNSPECIFIED) - ctx.EventManager().EmitEvent( - sdk.NewEvent( - types.EventTypeOracleSlash, - sdk.NewAttribute(types.AttributeKeyValidatorKey, validator), - sdk.NewAttribute(types.AttributeKeyPower, fmt.Sprintf("%d", power)), - sdk.NewAttribute(types.AttributeKeyReason, types.AttributeValueMissingReportPrice), - sdk.NewAttribute(types.AttributeKeyJailed, validator), - sdk.NewAttribute(types.AttributeKeyBurnedCoins, coinsBurned.String()), - ), - ) - am.keeper.Jail(ctx, consAddr) - jailUntil := ctx.BlockHeader().Time.Add(am.keeper.GetMissJailDuration(ctx)) - am.keeper.JailUntil(ctx, consAddr, jailUntil) - - // We need to reset the counter & array so that the validator won't be immediately slashed for miss report info upon rebonding. - reportedInfo.MissedRoundsCounter = 0 - reportedInfo.IndexOffset = 0 - am.keeper.ClearValidatorMissedRoundBitArray(ctx, validator) - - logger.Info( - "slashing and jailing validator due to liveness fault", - "height", height, - "validator", consAddr.String(), - "min_height", minHeight, - "threshold", minReportedPerWindow, - "slashed", am.keeper.GetSlashFractionMiss(ctx).String(), - "jailed_until", jailUntil, - ) - } else { - // validator was (a) not found or (b) already jailed so we do not slash - logger.Info( - "validator would have been slashed for too many missed repoerting price, but was either not found in store or already jailed", - "validator", validator, - ) - } - } - // Set the updated reportInfo - am.keeper.SetValidatorReportInfo(ctx, validator, reportedInfo) - } - } - - // append new round with previous price for fail-sealed token - for _, tokenID := range failed { - prevPrice, nextRoundID := am.keeper.GrowRoundID(ctx, tokenID) - logger.Info("add new round with previous price under fail aggregation", "tokenID", tokenID, "roundID", nextRoundID, "price", prevPrice) - } - - am.keeper.ResetAggregatorContextCheckTx() - - if _, _, paramsUpdated := cs.CommitCache(ctx, false, am.keeper); paramsUpdated { - var p cache.ItemP - cs.GetCache(&p) - params := types.Params(p) - agc.SetParams(¶ms) - ctx.EventManager().EmitEvent(sdk.NewEvent( - types.EventTypeCreatePrice, - sdk.NewAttribute(types.AttributeKeyParamsUpdated, types.AttributeValueParamsUpdatedSuccess), - )) - } - - if feederIDs := am.keeper.GetUpdatedFeederIDs(); len(feederIDs) > 0 { - feederIDsStr := strings.Join(feederIDs, "_") - ctx.EventManager().EmitEvent(sdk.NewEvent( - types.EventTypeCreatePrice, - sdk.NewAttribute(types.AttributeKeyPriceUpdated, types.AttributeValuePriceUpdatedSuccess), - sdk.NewAttribute(types.AttributeKeyFeederIDs, feederIDsStr), - )) - am.keeper.ResetUpdatedFeederIDs() - } - - newRoundFeederIDs := agc.PrepareRoundEndBlock(ctx, ctx.BlockHeight(), 0) - for _, feederID := range newRoundFeederIDs { - am.keeper.AddZeroNonceItemWithFeederIDForValidators(ctx, feederID, agc.GetValidators()) - } + am.keeper.EndBlock(ctx) return []abci.ValidatorUpdate{} } diff --git a/x/oracle/module_beginblock.go b/x/oracle/module_beginblock.go new file mode 100644 index 000000000..c010b6669 --- /dev/null +++ b/x/oracle/module_beginblock.go @@ -0,0 +1,13 @@ +//go:build !devmode + +package oracle + +import ( + abci "github.com/cometbft/cometbft/abci/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// BeginBlock contains the logic that is automatically triggered at the beginning of each block +func (am AppModule) BeginBlock(ctx sdk.Context, _ abci.RequestBeginBlock) { + am.keeper.BeginBlock(ctx) +} diff --git a/x/oracle/module_beginblock_devmode.go b/x/oracle/module_beginblock_devmode.go new file mode 100644 index 000000000..55cda7e7e --- /dev/null +++ b/x/oracle/module_beginblock_devmode.go @@ -0,0 +1,34 @@ +//go:build devmode + +package oracle + +import ( + "fmt" + + "github.com/ExocoreNetwork/exocore/x/oracle/keeper" + "github.com/ExocoreNetwork/exocore/x/oracle/keeper/feedermanagement" + abci "github.com/cometbft/cometbft/abci/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// BeginBlock contains the logic that is automatically triggered at the beginning of each block +func (am AppModule) BeginBlock(ctx sdk.Context, _ abci.RequestBeginBlock) { + logger := am.keeper.Logger(ctx) + am.keeper.BeginBlock(ctx) + + logger.Info("start simulating recovery in BeginBlock", "height", ctx.BlockHeight()) + // check the result of recovery + f := recoveryFeederManagerOnNextBlock(ctx, am.keeper) + if ok := am.keeper.FeederManager.Equals(f); !ok { + panic(fmt.Sprintf("there's something wrong in the recovery logic of feedermanager, block:%d", ctx.BlockHeight())) + } +} + +func recoveryFeederManagerOnNextBlock(ctx sdk.Context, k keeper.Keeper) *feedermanagement.FeederManager { + f := feedermanagement.NewFeederManager(k) + recovered := f.BeginBlock(ctx) + if ctx.BlockHeight() > 1 && !recovered { + panic(fmt.Sprintf("failed to do recovery for feedermanager, block:%d", ctx.BlockHeight())) + } + return f +} diff --git a/x/oracle/types/errors.go b/x/oracle/types/errors.go index 32b08b394..efc2d124b 100644 --- a/x/oracle/types/errors.go +++ b/x/oracle/types/errors.go @@ -16,12 +16,14 @@ const ( updateNativeTokenVirtualPriceFail nstAssetNotSurpported invalidPageLimit + failedInAggregation + quoteRecorded ) // x/oracle module sentinel errors var ( ErrInvalidMsg = sdkerrors.Register(ModuleName, invalidMsg, "invalid input create price") - ErrPriceProposalIgnored = sdkerrors.Register(ModuleName, priceProposalIgnored, "price proposal ignored") + ErrPriceProposalIgnored = sdkerrors.Register(ModuleName, priceProposalIgnored, "quote is ignored") ErrPriceProposalFormatInvalid = sdkerrors.Register(ModuleName, priceProposalFormatInvalid, "price proposal message format invalid") ErrInvalidParams = sdkerrors.Register(ModuleName, invalidParams, "invalid params") ErrGetPriceAssetNotFound = sdkerrors.Register(ModuleName, getPriceFailedAssetNotFound, "get price failed for asset not found") @@ -29,4 +31,6 @@ var ( ErrUpdateNativeTokenVirtualPriceFail = sdkerrors.Register(ModuleName, updateNativeTokenVirtualPriceFail, "update native token balance change failed") ErrNSTAssetNotSupported = sdkerrors.Register(ModuleName, nstAssetNotSurpported, "nstAsset not supported") ErrInvalidPagination = sdkerrors.Register(ModuleName, invalidPageLimit, "params for pagination is invalid") + ErrFailedInAggregation = sdkerrors.Register(ModuleName, failedInAggregation, "failed in aggregation") + ErrQuoteRecorded = sdkerrors.Register(ModuleName, quoteRecorded, "quote recorded") ) diff --git a/x/oracle/types/genesis.pb.go b/x/oracle/types/genesis.pb.go index 5b2b80079..d1bccc0ac 100644 --- a/x/oracle/types/genesis.pb.go +++ b/x/oracle/types/genesis.pb.go @@ -169,6 +169,8 @@ type StakerInfosAssets struct { AssetId string `protobuf:"bytes,1,opt,name=asset_id,json=assetId,proto3" json:"asset_id,omitempty"` // stakerInfos StakerInfos []*StakerInfo `protobuf:"bytes,2,rep,name=staker_infos,json=stakerInfos,proto3" json:"staker_infos,omitempty"` + // nst_version is the version of nst to track validator list changes + NstVersion int64 `protobuf:"varint,3,opt,name=nst_version,json=nstVersion,proto3" json:"nst_version,omitempty"` } func (m *StakerInfosAssets) Reset() { *m = StakerInfosAssets{} } @@ -218,12 +220,21 @@ func (m *StakerInfosAssets) GetStakerInfos() []*StakerInfo { return nil } +func (m *StakerInfosAssets) GetNstVersion() int64 { + if m != nil { + return m.NstVersion + } + return 0 +} + // stakerListAssets bond stakerList to their related assets id type StakerListAssets struct { // asset_id tells the assetid which the stakerList belong to AssetId string `protobuf:"bytes,1,opt,name=asset_id,json=assetId,proto3" json:"asset_id,omitempty"` // stakerList StakerList *StakerList `protobuf:"bytes,2,opt,name=staker_list,json=stakerList,proto3" json:"staker_list,omitempty"` + // nst_version is the version of nst to track validator list changes + NstVersion int64 `protobuf:"varint,3,opt,name=nst_version,json=nstVersion,proto3" json:"nst_version,omitempty"` } func (m *StakerListAssets) Reset() { *m = StakerListAssets{} } @@ -273,6 +284,13 @@ func (m *StakerListAssets) GetStakerList() *StakerList { return nil } +func (m *StakerListAssets) GetNstVersion() int64 { + if m != nil { + return m.NstVersion + } + return 0 +} + // ValidatorMissedRounds record missed rounds indexes for a validator which consAddr corresponding to the address type ValidatorMissedRounds struct { // address of validator @@ -394,51 +412,52 @@ func init() { func init() { proto.RegisterFile("exocore/oracle/v1/genesis.proto", fileDescriptor_6b68ac5b0c7f4305) } var fileDescriptor_6b68ac5b0c7f4305 = []byte{ - // 691 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x94, 0xdd, 0x6e, 0xd3, 0x30, - 0x14, 0xc7, 0xdb, 0x7d, 0xb4, 0xdb, 0xe9, 0x06, 0xad, 0xf7, 0x41, 0x36, 0xb1, 0x6c, 0x94, 0x01, - 0x93, 0x90, 0x52, 0x06, 0x17, 0x5c, 0x20, 0xa1, 0x31, 0x84, 0xd0, 0x06, 0x43, 0x28, 0xe3, 0x43, - 0x9a, 0x84, 0xa2, 0xb4, 0xf1, 0x3a, 0xab, 0x6d, 0x5c, 0xd9, 0x5e, 0x18, 0x6f, 0xc1, 0x8b, 0xf0, - 0x1e, 0xbb, 0xdc, 0x25, 0x57, 0x08, 0xad, 0x2f, 0x82, 0x72, 0x9c, 0xac, 0x49, 0x93, 0x96, 0xbb, - 0xd8, 0xe7, 0x7f, 0x7e, 0xe7, 0xf8, 0x7f, 0xec, 0xc0, 0x26, 0xbd, 0xe0, 0x2d, 0x2e, 0x68, 0x83, - 0x0b, 0xb7, 0xd5, 0xa5, 0x8d, 0x60, 0xb7, 0xd1, 0xa6, 0x3e, 0x95, 0x4c, 0x5a, 0x7d, 0xc1, 0x15, - 0x27, 0xb5, 0x48, 0x60, 0x69, 0x81, 0x15, 0xec, 0xae, 0xef, 0x64, 0x73, 0x98, 0xef, 0xd1, 0x0b, - 0x47, 0xd0, 0x16, 0xf5, 0x95, 0xd3, 0x93, 0x6d, 0x9d, 0xbc, 0xfe, 0xf8, 0x3f, 0xca, 0xbe, 0x2b, - 0xdc, 0x5e, 0x54, 0x69, 0x7d, 0x3b, 0x2b, 0xf6, 0x5d, 0xc5, 0x02, 0xea, 0x28, 0xde, 0xa1, 0x7e, - 0xa4, 0x32, 0xb3, 0xaa, 0x14, 0x25, 0x2f, 0x2e, 0x58, 0x8b, 0xc6, 0xf1, 0x7a, 0x36, 0x9e, 0x69, - 0xfb, 0xc1, 0x58, 0x4d, 0xaa, 0xd4, 0x56, 0x56, 0x26, 0xbb, 0xae, 0x3c, 0x63, 0x7e, 0x0c, 0xb2, - 0xb2, 0x8a, 0xc0, 0xed, 0x32, 0xcf, 0x55, 0x5c, 0x38, 0xe7, 0x7d, 0xcf, 0x55, 0xd4, 0x69, 0x76, - 0x79, 0xab, 0x13, 0xe9, 0x97, 0xdb, 0xbc, 0xcd, 0xf1, 0xb3, 0x11, 0x7e, 0xe9, 0xdd, 0xfa, 0xaf, - 0x32, 0x2c, 0xbc, 0xd5, 0x43, 0x39, 0x56, 0xae, 0xa2, 0xe4, 0x39, 0x94, 0x74, 0x23, 0x46, 0x71, - 0xab, 0xb8, 0x53, 0x79, 0xba, 0x66, 0x65, 0x86, 0x64, 0x7d, 0x44, 0xc1, 0xfe, 0xcc, 0xe5, 0x9f, - 0xcd, 0x82, 0x1d, 0xc9, 0xc9, 0x1e, 0x54, 0xb4, 0x19, 0x4e, 0x97, 0x49, 0x65, 0x4c, 0x6d, 0x4d, - 0x8f, 0xcb, 0x46, 0x55, 0x94, 0x0d, 0x3a, 0xe7, 0x3d, 0x93, 0x8a, 0x7c, 0x83, 0xd5, 0xfc, 0x13, - 0x18, 0xd3, 0xd8, 0xca, 0xa3, 0x1c, 0xd8, 0x97, 0x38, 0xe1, 0x33, 0xea, 0xf7, 0x43, 0xb9, 0xbd, - 0x1c, 0xe4, 0xec, 0x92, 0x4f, 0xb0, 0x94, 0x73, 0x41, 0x8c, 0x19, 0x64, 0x6f, 0xe7, 0xb0, 0x0f, - 0x42, 0xb5, 0x8d, 0x62, 0x7d, 0x62, 0xbb, 0xc6, 0x46, 0xb7, 0xc8, 0x3b, 0xa8, 0x8e, 0x5e, 0x50, - 0x63, 0x16, 0x91, 0xf7, 0x26, 0x23, 0x8f, 0x64, 0xdb, 0xbe, 0xc5, 0x52, 0x6b, 0x72, 0x08, 0xb7, - 0x87, 0x18, 0xed, 0x63, 0x09, 0x7d, 0xbc, 0x9b, 0xc3, 0xba, 0x49, 0x8b, 0xac, 0x5c, 0x14, 0xf1, - 0x06, 0xba, 0x79, 0x0c, 0x24, 0x75, 0x50, 0x8d, 0x2b, 0x23, 0x6e, 0x73, 0x2c, 0x2e, 0x35, 0xda, - 0xaa, 0x48, 0xec, 0x21, 0xf4, 0x04, 0x96, 0xa4, 0x72, 0x3b, 0x54, 0x38, 0xcc, 0x3f, 0xe5, 0xd2, - 0x71, 0xa5, 0xa4, 0x4a, 0x1a, 0x73, 0x48, 0xcd, 0xf3, 0xf0, 0x18, 0xd5, 0x07, 0xa1, 0xf8, 0x15, - 0x6a, 0x23, 0x74, 0x4d, 0x8e, 0x06, 0xc8, 0x57, 0x20, 0x11, 0x3b, 0xec, 0x34, 0x46, 0xcf, 0x23, - 0xfa, 0xfe, 0x58, 0x74, 0xd8, 0x56, 0x8a, 0x5c, 0x95, 0x23, 0xfb, 0xa4, 0x99, 0xbc, 0x57, 0x82, - 0xf6, 0xb9, 0x50, 0xba, 0x7d, 0x03, 0x10, 0xfe, 0x70, 0xd2, 0xbd, 0xb2, 0x51, 0x1f, 0xf6, 0x19, - 0xf1, 0x87, 0x97, 0x6b, 0x18, 0x92, 0xe4, 0x14, 0xee, 0x0c, 0x6b, 0xf4, 0x98, 0x94, 0xd4, 0x73, - 0x04, 0x3f, 0xf7, 0x3d, 0x69, 0x54, 0xb0, 0xc8, 0xce, 0xa4, 0x22, 0x47, 0x98, 0x60, 0xa3, 0x3e, - 0x2a, 0xb3, 0x12, 0xe4, 0x05, 0xeb, 0x7d, 0xa8, 0x65, 0x2c, 0x25, 0x6b, 0x30, 0x87, 0x6e, 0x39, - 0xcc, 0xc3, 0x57, 0x3b, 0x6f, 0x97, 0x71, 0x7d, 0xe0, 0x91, 0x3d, 0x58, 0x48, 0x0e, 0x2c, 0x7a, - 0x96, 0x1b, 0x13, 0x27, 0x65, 0x57, 0x12, 0xc3, 0xa9, 0xf7, 0xa0, 0x3a, 0xea, 0xf4, 0xa4, 0x82, - 0x2f, 0xa1, 0x92, 0x98, 0xa2, 0x31, 0x85, 0x4f, 0x61, 0x63, 0xe2, 0xf8, 0x6c, 0x18, 0x8e, 0xac, - 0x1e, 0xc0, 0x4a, 0xae, 0x2d, 0xc4, 0x80, 0xb2, 0xeb, 0x79, 0x82, 0x4a, 0x79, 0x53, 0x52, 0x2f, - 0xc9, 0x6b, 0x58, 0x4c, 0x3b, 0xae, 0x0f, 0x69, 0xe6, 0x14, 0x4d, 0x10, 0xed, 0x85, 0x5e, 0xd2, - 0xd8, 0x17, 0x50, 0x49, 0x04, 0xc9, 0x32, 0xcc, 0xe2, 0xdb, 0xc4, 0x5a, 0x33, 0xb6, 0x5e, 0x90, - 0x55, 0x28, 0xe9, 0x24, 0x3c, 0xd7, 0x9c, 0x1d, 0xad, 0xf6, 0x0f, 0x2f, 0xaf, 0xcd, 0xe2, 0xd5, - 0xb5, 0x59, 0xfc, 0x7b, 0x6d, 0x16, 0x7f, 0x0e, 0xcc, 0xc2, 0xd5, 0xc0, 0x2c, 0xfc, 0x1e, 0x98, - 0x85, 0x93, 0x27, 0x6d, 0xa6, 0xce, 0xce, 0x9b, 0x56, 0x8b, 0xf7, 0x1a, 0x6f, 0x74, 0x3b, 0x1f, - 0xa8, 0xfa, 0xce, 0x45, 0xa7, 0x11, 0xff, 0xbf, 0x2f, 0xe2, 0x3f, 0xb8, 0xfa, 0xd1, 0xa7, 0xb2, - 0x59, 0xc2, 0x1f, 0xf3, 0xb3, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x3f, 0xd5, 0x91, 0x78, 0x3e, - 0x07, 0x00, 0x00, + // 715 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x55, 0xdd, 0x4e, 0x13, 0x41, + 0x14, 0x6e, 0xf9, 0x69, 0xe1, 0x14, 0xb4, 0x1d, 0x7e, 0x5c, 0x88, 0x6c, 0xb1, 0xa2, 0x92, 0x98, + 0xb4, 0xa2, 0x17, 0x5e, 0x98, 0x18, 0xc4, 0x18, 0x03, 0x8a, 0x31, 0x83, 0x62, 0x42, 0x62, 0x36, + 0xdb, 0xee, 0x50, 0x26, 0x6d, 0x77, 0x9a, 0x99, 0x61, 0xc5, 0x97, 0x30, 0xfa, 0x20, 0xbe, 0x07, + 0x97, 0x5c, 0x7a, 0x65, 0x0c, 0xbc, 0x88, 0xe9, 0x99, 0x2d, 0xdd, 0xed, 0x6e, 0xcb, 0x5d, 0xe7, + 0x9c, 0xef, 0x7c, 0xe7, 0x9b, 0xf3, 0xcd, 0xe9, 0x42, 0x99, 0x9d, 0x89, 0x86, 0x90, 0xac, 0x26, + 0xa4, 0xdb, 0x68, 0xb3, 0x5a, 0xb0, 0x55, 0x6b, 0x32, 0x9f, 0x29, 0xae, 0xaa, 0x5d, 0x29, 0xb4, + 0x20, 0xa5, 0x10, 0x50, 0x35, 0x80, 0x6a, 0xb0, 0xb5, 0xba, 0x99, 0xac, 0xe1, 0xbe, 0xc7, 0xce, + 0x1c, 0xc9, 0x1a, 0xcc, 0xd7, 0x4e, 0x47, 0x35, 0x4d, 0xf1, 0xea, 0xe3, 0x1b, 0x90, 0x5d, 0x57, + 0xba, 0x9d, 0xb0, 0xd3, 0xea, 0x46, 0x12, 0xec, 0xbb, 0x9a, 0x07, 0xcc, 0xd1, 0xa2, 0xc5, 0xfc, + 0x10, 0x65, 0x27, 0x51, 0x31, 0x96, 0xb4, 0xbc, 0xe4, 0x0d, 0xd6, 0xcf, 0x57, 0x92, 0xf9, 0x84, + 0xec, 0x07, 0x23, 0x31, 0xb1, 0x56, 0xeb, 0x49, 0x98, 0x6a, 0xbb, 0xea, 0x84, 0xfb, 0x7d, 0xa2, + 0x6a, 0x12, 0x11, 0xb8, 0x6d, 0xee, 0xb9, 0x5a, 0x48, 0xe7, 0xb4, 0xeb, 0xb9, 0x9a, 0x39, 0xf5, + 0xb6, 0x68, 0xb4, 0x42, 0xfc, 0x62, 0x53, 0x34, 0x05, 0xfe, 0xac, 0xf5, 0x7e, 0x99, 0x68, 0xe5, + 0x77, 0x1e, 0xe6, 0xde, 0x1a, 0x53, 0x0e, 0xb4, 0xab, 0x19, 0x79, 0x0e, 0x39, 0x23, 0xc4, 0xca, + 0xae, 0x67, 0x37, 0x0b, 0x4f, 0x57, 0xaa, 0x09, 0x93, 0xaa, 0x1f, 0x11, 0xb0, 0x33, 0x75, 0xfe, + 0xb7, 0x9c, 0xa1, 0x21, 0x9c, 0x6c, 0x43, 0xc1, 0x0c, 0xc3, 0x69, 0x73, 0xa5, 0xad, 0x89, 0xf5, + 0xc9, 0x51, 0xd5, 0x88, 0x0a, 0xab, 0xc1, 0xd4, 0xbc, 0xe7, 0x4a, 0x93, 0xaf, 0xb0, 0x9c, 0x7e, + 0x03, 0x6b, 0x12, 0xa5, 0x3c, 0x4a, 0x21, 0x3b, 0xec, 0x17, 0x7c, 0x46, 0xfc, 0x4e, 0x0f, 0x4e, + 0x17, 0x83, 0x94, 0x28, 0xf9, 0x04, 0x0b, 0x29, 0x0f, 0xc4, 0x9a, 0x42, 0xee, 0x8d, 0x14, 0xee, + 0xdd, 0x1e, 0x9a, 0x22, 0xd8, 0xdc, 0x98, 0x96, 0xf8, 0x70, 0x88, 0xbc, 0x83, 0xe2, 0xf0, 0x03, + 0xb5, 0xa6, 0x91, 0xf2, 0xde, 0x78, 0xca, 0x7d, 0xd5, 0xa4, 0xb7, 0x78, 0xec, 0x4c, 0xf6, 0xe0, + 0xf6, 0x80, 0xc6, 0xcc, 0x31, 0x87, 0x73, 0xbc, 0x9b, 0xc2, 0x75, 0x5d, 0x16, 0x8e, 0x72, 0x5e, + 0xf6, 0x03, 0x38, 0xcd, 0x03, 0x20, 0xb1, 0x8b, 0x1a, 0xba, 0x3c, 0xd2, 0x95, 0x47, 0xd2, 0xc5, + 0xac, 0x2d, 0xca, 0x48, 0x0c, 0x49, 0x8f, 0x60, 0x41, 0x69, 0xb7, 0xc5, 0xa4, 0xc3, 0xfd, 0x63, + 0xa1, 0x1c, 0x57, 0x29, 0xa6, 0x95, 0x35, 0x83, 0xac, 0x69, 0x33, 0x3c, 0x40, 0xf4, 0x6e, 0x0f, + 0xfc, 0x0a, 0xb1, 0x21, 0x75, 0x49, 0x0d, 0x27, 0xc8, 0x17, 0x20, 0x21, 0x77, 0x4f, 0x69, 0x9f, + 0x7a, 0x16, 0xa9, 0xef, 0x8f, 0xa4, 0xee, 0xc9, 0x8a, 0x31, 0x17, 0xd5, 0x50, 0x9c, 0xd4, 0xa3, + 0xef, 0x4a, 0xb2, 0xae, 0x90, 0xda, 0xc8, 0xb7, 0x00, 0xc9, 0x1f, 0x8e, 0x7b, 0x57, 0x14, 0xf1, + 0x3d, 0x9d, 0x21, 0xff, 0xe0, 0x71, 0x0d, 0x52, 0x8a, 0x1c, 0xc3, 0x9d, 0x41, 0x8f, 0x0e, 0x57, + 0x8a, 0x79, 0x8e, 0x14, 0xa7, 0xbe, 0xa7, 0xac, 0x02, 0x36, 0xd9, 0x1c, 0xd7, 0x64, 0x1f, 0x0b, + 0x28, 0xe2, 0xc3, 0x36, 0x4b, 0x41, 0x5a, 0xb2, 0xf2, 0x2b, 0x0b, 0xa5, 0xc4, 0x4c, 0xc9, 0x0a, + 0xcc, 0xe0, 0xb8, 0x1c, 0xee, 0xe1, 0xda, 0xce, 0xd2, 0x3c, 0x9e, 0x77, 0x3d, 0xb2, 0x0d, 0x73, + 0x51, 0xc7, 0xc2, 0xbd, 0x5c, 0x1b, 0x6b, 0x15, 0x2d, 0x44, 0xdc, 0x21, 0x65, 0x28, 0xf8, 0x4a, + 0x3b, 0x01, 0x93, 0x8a, 0x0b, 0x1f, 0x77, 0x71, 0x92, 0x82, 0xaf, 0xf4, 0xa1, 0x89, 0x54, 0x7e, + 0x64, 0xa1, 0x38, 0x6c, 0xc6, 0x38, 0x49, 0x2f, 0xa1, 0x10, 0x31, 0xda, 0x9a, 0xc0, 0x6d, 0x59, + 0x1b, 0xeb, 0x30, 0x85, 0x81, 0xab, 0x37, 0x0b, 0x0a, 0x60, 0x29, 0x75, 0xb4, 0xc4, 0x82, 0xbc, + 0xeb, 0x79, 0x92, 0x29, 0x75, 0xad, 0xc9, 0x1c, 0xc9, 0x6b, 0x98, 0x8f, 0xbb, 0x66, 0xe6, 0x64, + 0xa7, 0xa8, 0x8a, 0x30, 0xd2, 0xb9, 0x4e, 0xd4, 0x9c, 0x17, 0x50, 0x88, 0x24, 0xc9, 0x22, 0x4c, + 0xe3, 0x7e, 0x63, 0xaf, 0x29, 0x6a, 0x0e, 0x64, 0x19, 0x72, 0xa6, 0x08, 0x2f, 0x3e, 0x43, 0xc3, + 0xd3, 0xce, 0xde, 0xf9, 0xa5, 0x9d, 0xbd, 0xb8, 0xb4, 0xb3, 0xff, 0x2e, 0xed, 0xec, 0xcf, 0x2b, + 0x3b, 0x73, 0x71, 0x65, 0x67, 0xfe, 0x5c, 0xd9, 0x99, 0xa3, 0x27, 0x4d, 0xae, 0x4f, 0x4e, 0xeb, + 0xd5, 0x86, 0xe8, 0xd4, 0xde, 0x18, 0x39, 0x1f, 0x98, 0xfe, 0x26, 0x64, 0xab, 0xd6, 0xff, 0x06, + 0x9c, 0xf5, 0xbf, 0x02, 0xfa, 0x7b, 0x97, 0xa9, 0x7a, 0x0e, 0xff, 0xdc, 0x9f, 0xfd, 0x0f, 0x00, + 0x00, 0xff, 0xff, 0x41, 0x47, 0xc5, 0x22, 0x82, 0x07, 0x00, 0x00, } func (m *GenesisState) Marshal() (dAtA []byte, err error) { @@ -628,6 +647,11 @@ func (m *StakerInfosAssets) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.NstVersion != 0 { + i = encodeVarintGenesis(dAtA, i, uint64(m.NstVersion)) + i-- + dAtA[i] = 0x18 + } if len(m.StakerInfos) > 0 { for iNdEx := len(m.StakerInfos) - 1; iNdEx >= 0; iNdEx-- { { @@ -672,6 +696,11 @@ func (m *StakerListAssets) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.NstVersion != 0 { + i = encodeVarintGenesis(dAtA, i, uint64(m.NstVersion)) + i-- + dAtA[i] = 0x18 + } if m.StakerList != nil { { size, err := m.StakerList.MarshalToSizedBuffer(dAtA[:i]) @@ -868,6 +897,9 @@ func (m *StakerInfosAssets) Size() (n int) { n += 1 + l + sovGenesis(uint64(l)) } } + if m.NstVersion != 0 { + n += 1 + sovGenesis(uint64(m.NstVersion)) + } return n } @@ -885,6 +917,9 @@ func (m *StakerListAssets) Size() (n int) { l = m.StakerList.Size() n += 1 + l + sovGenesis(uint64(l)) } + if m.NstVersion != 0 { + n += 1 + sovGenesis(uint64(m.NstVersion)) + } return n } @@ -1452,6 +1487,25 @@ func (m *StakerInfosAssets) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NstVersion", wireType) + } + m.NstVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NstVersion |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipGenesis(dAtA[iNdEx:]) @@ -1570,6 +1624,25 @@ func (m *StakerListAssets) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NstVersion", wireType) + } + m.NstVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NstVersion |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipGenesis(dAtA[iNdEx:]) diff --git a/x/oracle/types/genesis_test.go b/x/oracle/types/genesis_test.go index bf73106b9..c0f551a7c 100644 --- a/x/oracle/types/genesis_test.go +++ b/x/oracle/types/genesis_test.go @@ -62,7 +62,6 @@ func TestGenesisState_Validate(t *testing.T) { MinReportedPerWindow: sdkmath.LegacyNewDec(1).Quo(sdkmath.LegacyNewDec(2)), OracleMissJailDuration: 600 * time.Second, OracleMaliciousJailDuration: 30 * 24 * time.Hour, - SlashFractionMiss: sdkmath.LegacyNewDec(1).Quo(sdkmath.LegacyNewDec(20)), SlashFractionMalicious: sdkmath.LegacyNewDec(1).Quo(sdkmath.LegacyNewDec(10)), }, }, diff --git a/x/oracle/types/key_native_token.go b/x/oracle/types/key_native_token.go index 9c7797cf0..0d444f741 100644 --- a/x/oracle/types/key_native_token.go +++ b/x/oracle/types/key_native_token.go @@ -8,6 +8,7 @@ const ( NativeTokenPriceKeyPrefix = NativeTokenKeyPrefix + "price/value/" NativeTokenStakerInfoKeyPrefix = NativeTokenKeyPrefix + "stakerInfo/value/" NativeTokenStakerListKeyPrefix = NativeTokenKeyPrefix + "stakerList/value/" + NativeTokenVersionKeyPrefix = NativeTokenKeyPrefix + "version/" ) // NativeTokenStakerKeyPrefix returns the prefix for stakerInfo key @@ -41,3 +42,7 @@ func ParseNativeTokenStakerKey(key []byte) (assetID, stakerAddr string) { } return parsed[0], parsed[1] } + +func NativeTokenVersionKey(assetID string) []byte { + return append([]byte(NativeTokenVersionKeyPrefix), []byte(assetID)...) +} diff --git a/x/oracle/types/key_slashing.go b/x/oracle/types/key_slashing.go index ec5f13ee2..893a83ccb 100644 --- a/x/oracle/types/key_slashing.go +++ b/x/oracle/types/key_slashing.go @@ -10,11 +10,15 @@ func SlashingValidatorReportInfoKey(validator string) []byte { return append(ValidatorReportInfoPrefix, []byte(validator)...) } -func SlashingMissedBitArrayPrefix(validator string) []byte { +func SlashingMissedBitArrayPrefix() []byte { + return MissedBitArrayPrefix +} + +func SlashingMissedBitArrayValidatorPrefix(validator string) []byte { key := append([]byte(validator), DelimiterForCombinedKey) return append(MissedBitArrayPrefix, key...) } func SlashingMissedBitArrayKey(validator string, index uint64) []byte { - return append(SlashingMissedBitArrayPrefix(validator), Uint64Bytes(index)...) + return append(SlashingMissedBitArrayValidatorPrefix(validator), Uint64Bytes(index)...) } diff --git a/x/oracle/types/params.go b/x/oracle/types/params.go index a174fdd42..194cd7534 100644 --- a/x/oracle/types/params.go +++ b/x/oracle/types/params.go @@ -2,6 +2,7 @@ package types import ( "errors" + "fmt" "strings" "time" @@ -83,7 +84,6 @@ func DefaultParams() Params { MinReportedPerWindow: sdkmath.LegacyNewDec(1).Quo(sdkmath.LegacyNewDec(2)), OracleMissJailDuration: 600 * time.Second, OracleMaliciousJailDuration: 30 * 24 * time.Hour, - SlashFractionMiss: sdkmath.LegacyNewDec(1).Quo(sdkmath.LegacyNewDec(20)), SlashFractionMalicious: sdkmath.LegacyNewDec(1).Quo(sdkmath.LegacyNewDec(10)), }, } @@ -124,9 +124,6 @@ func (p Params) Validate() error { if slashing.MinReportedPerWindow.GT(oneDec) || !slashing.MinReportedPerWindow.IsPositive() { return ErrInvalidParams.Wrapf("MinReportedPerWindow must be in (0, 1], got %v", slashing.MinReportedPerWindow) } - if slashing.SlashFractionMiss.GT(oneDec) || !slashing.SlashFractionMiss.IsPositive() { - return ErrInvalidParams.Wrapf("SlashFractionMiss must be in (0, 1], got %v", slashing.SlashFractionMiss) - } if slashing.SlashFractionMalicious.GT(oneDec) || !slashing.SlashFractionMalicious.IsPositive() { return ErrInvalidParams.Wrapf("SlashFractionMalicious must be in (0, 1], got %v", slashing.SlashFractionMalicious) } @@ -294,6 +291,7 @@ func (p Params) UpdateTokens(currentHeight uint64, tokens ...*Token) (Params, er if len(t.AssetID) > 0 { token.AssetID = t.AssetID } + // #nosec G115 - tokenID is actually uint since it's index of array if !p.TokenStarted(uint64(tokenID), currentHeight) { // contractAddres is mainly used as a description information if len(t.ContractAddress) > 0 { @@ -504,6 +502,18 @@ func (p Params) GetTokenIDFromAssetID(assetID string) int { return 0 } +func (p Params) GetAssetIDForNSTFromTokenID(tokenID uint64) string { + assetIDs := p.GetAssetIDsFromTokenID(tokenID) + for _, assetID := range assetIDs { + if nstChain, ok := strings.CutPrefix(strings.ToLower(assetID), NSTIDPrefix); ok { + if NSTChain, ok := NSTChainsInverted[nstChain]; ok { + return fmt.Sprintf("%s_%s", NSTAssetAddr[NSTChain], nstChain) + } + } + } + return "" +} + func (p Params) GetAssetIDsFromTokenID(tokenID uint64) []string { if tokenID >= uint64(len(p.Tokens)) { return nil @@ -525,7 +535,8 @@ func (p Params) IsValidSource(sourceID uint64) bool { func (p Params) GetTokenFeeder(feederID uint64) *TokenFeeder { for k, v := range p.TokenFeeders { - if uint64(k) == feederID { + // #nosec G115 // index of array is uint + if k >= 0 && uint64(k) == feederID { return v } } @@ -534,6 +545,7 @@ func (p Params) GetTokenFeeder(feederID uint64) *TokenFeeder { func (p Params) GetTokenInfo(feederID uint64) *Token { for k, v := range p.TokenFeeders { + // #nosec G115 // index of arry is uint if uint64(k) == feederID { return p.Tokens[v.TokenID] } @@ -559,6 +571,7 @@ func (p Params) CheckRules(feederID uint64, prices []*PriceSource) (bool, error) if source.Valid { notFound = true for _, p := range prices { + // #nosec G115 // index of array is uint if p.SourceID == uint64(sID) { notFound = false break @@ -594,3 +607,28 @@ func (p Params) CheckDecimal(feederID uint64, decimal int32) bool { token := p.Tokens[feeder.TokenID] return token.Decimal == decimal } + +func (p Params) IsForceSealingUpdate(params *Params) bool { + if params == nil { + return false + } + if p.MaxNonce != params.MaxNonce || + p.MaxDetId != params.MaxDetId || + p.ThresholdA != params.ThresholdA || + p.ThresholdB != params.ThresholdB || + p.Mode != params.Mode { + return true + } + return false +} + +func (p Params) IsSlashingResetUpdate(params *Params) bool { + if params == nil || params.Slashing == nil { + return false + } + if p.Slashing.ReportedRoundsWindow != params.Slashing.ReportedRoundsWindow || + p.Slashing.MinReportedPerWindow != params.Slashing.MinReportedPerWindow { + return true + } + return false +} diff --git a/x/oracle/types/params.pb.go b/x/oracle/types/params.pb.go index bde782f63..369003a8c 100644 --- a/x/oracle/types/params.pb.go +++ b/x/oracle/types/params.pb.go @@ -212,10 +212,8 @@ type SlashingParams struct { OracleMissJailDuration time.Duration `protobuf:"bytes,3,opt,name=oracle_miss_jail_duration,json=oracleMissJailDuration,proto3,stdduration" json:"oracle_miss_jail_duration"` // oracle_malicious_jail_duration defines the duratin one validator should be jailed for malicious behavior OracleMaliciousJailDuration time.Duration `protobuf:"bytes,4,opt,name=oracle_malicious_jail_duration,json=oracleMaliciousJailDuration,proto3,stdduration" json:"oracle_malicious_jail_duration"` - // slash_fraction_miss defines the fraction one validator should be punished for msissing reporting price - SlashFractionMiss github_com_cosmos_cosmos_sdk_types.Dec `protobuf:"bytes,5,opt,name=slash_fraction_miss,json=slashFractionMiss,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Dec" json:"slash_fraction_miss"` // slash_fraction_malicious defines the fraction one validator should be punished for malicious behavior - SlashFractionMalicious github_com_cosmos_cosmos_sdk_types.Dec `protobuf:"bytes,6,opt,name=slash_fraction_malicious,json=slashFractionMalicious,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Dec" json:"slash_fraction_malicious"` + SlashFractionMalicious github_com_cosmos_cosmos_sdk_types.Dec `protobuf:"bytes,5,opt,name=slash_fraction_malicious,json=slashFractionMalicious,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Dec" json:"slash_fraction_malicious"` } func (m *SlashingParams) Reset() { *m = SlashingParams{} } @@ -281,57 +279,56 @@ func init() { func init() { proto.RegisterFile("exocore/oracle/v1/params.proto", fileDescriptor_72f39bba4594b794) } var fileDescriptor_72f39bba4594b794 = []byte{ - // 791 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x4d, 0x4f, 0xeb, 0x46, - 0x14, 0x8d, 0x5f, 0x3e, 0x1e, 0x6f, 0x42, 0x5e, 0x5f, 0x06, 0x4a, 0x87, 0x40, 0x1d, 0x17, 0x55, - 0x28, 0x42, 0xaa, 0x0d, 0x84, 0x55, 0xd5, 0x2e, 0xc8, 0x07, 0x12, 0x48, 0x84, 0xc8, 0x29, 0xaa, - 0xd4, 0x8d, 0xe5, 0xd8, 0x93, 0x64, 0xc0, 0xf6, 0x44, 0x33, 0x36, 0xa4, 0x6c, 0xbb, 0xa9, 0x58, - 0x75, 0xc9, 0x06, 0xa9, 0x52, 0x37, 0x5d, 0x76, 0xdf, 0x3f, 0xc0, 0x92, 0x65, 0xc5, 0x82, 0x56, - 0xb0, 0xe8, 0xdf, 0xa8, 0x3c, 0xb6, 0x53, 0x12, 0xc2, 0xe2, 0xb1, 0x49, 0x3c, 0xf7, 0x9c, 0x73, - 0xcf, 0xbd, 0x77, 0x7c, 0x0d, 0x64, 0x3c, 0xa2, 0x16, 0x65, 0x58, 0xa3, 0xcc, 0xb4, 0x1c, 0xac, - 0x9d, 0x6d, 0x69, 0x43, 0x93, 0x99, 0x2e, 0x57, 0x87, 0x8c, 0xfa, 0x14, 0x16, 0x63, 0x5c, 0x8d, - 0x70, 0xf5, 0x6c, 0xab, 0x54, 0x34, 0x5d, 0xe2, 0x51, 0x4d, 0xfc, 0x46, 0xac, 0xd2, 0xea, 0xf3, - 0x2c, 0xc4, 0xeb, 0x25, 0xe8, 0x97, 0xcf, 0x51, 0x9f, 0x9e, 0x62, 0xcf, 0xe8, 0x61, 0x6c, 0x63, - 0x16, 0xb3, 0x16, 0xfb, 0xb4, 0x4f, 0xc5, 0xa3, 0x16, 0x3e, 0xc5, 0x51, 0xb9, 0x4f, 0x69, 0xdf, - 0xc1, 0x9a, 0x38, 0x75, 0x83, 0x9e, 0x66, 0x07, 0xcc, 0xf4, 0x09, 0xf5, 0x22, 0x7c, 0xed, 0xcf, - 0x0c, 0xc8, 0xb5, 0x45, 0xc1, 0x70, 0x13, 0xe4, 0xac, 0x81, 0x49, 0x3c, 0x8e, 0x24, 0x25, 0x5d, - 0xc9, 0x6f, 0x23, 0xf5, 0x59, 0xed, 0x6a, 0x3d, 0x24, 0xe8, 0x31, 0x2f, 0x54, 0x88, 0x42, 0x38, - 0x7a, 0xf3, 0xa2, 0xe2, 0xbb, 0x90, 0xa0, 0xc7, 0x3c, 0x58, 0x05, 0x6f, 0x39, 0x0d, 0x98, 0x85, - 0x39, 0x4a, 0x0b, 0xc9, 0xf2, 0x0c, 0x49, 0x47, 0x30, 0xf4, 0x84, 0x09, 0xab, 0x20, 0xcb, 0x02, - 0x07, 0x73, 0x94, 0x11, 0x92, 0xcf, 0x67, 0x48, 0xf4, 0xc0, 0xc1, 0xb1, 0x2c, 0xe2, 0xc2, 0x3a, - 0x28, 0x3c, 0x1d, 0x12, 0x47, 0x59, 0x21, 0x96, 0x5f, 0x2a, 0x71, 0x4f, 0xd0, 0xf4, 0x79, 0xff, - 0xff, 0x03, 0x87, 0x2b, 0xe0, 0x9d, 0x6b, 0x8e, 0x0c, 0x8f, 0x7a, 0x16, 0x46, 0x39, 0x45, 0xaa, - 0x64, 0xf5, 0x39, 0xd7, 0x1c, 0xb5, 0xc2, 0x33, 0x2c, 0x83, 0xbc, 0x3f, 0x60, 0x98, 0x0f, 0xa8, - 0x63, 0x1b, 0x26, 0x7a, 0x2b, 0x60, 0x30, 0x0e, 0xed, 0x4e, 0x12, 0xba, 0x68, 0x6e, 0x8a, 0x50, - 0x83, 0x3b, 0x20, 0xe3, 0x52, 0x1b, 0xa3, 0x77, 0x8a, 0x54, 0x79, 0xbf, 0xad, 0xcc, 0x9a, 0x37, - 0xf5, 0x38, 0xf6, 0x78, 0xc0, 0x0f, 0xa9, 0x8d, 0x75, 0xc1, 0x86, 0xab, 0x00, 0x84, 0x45, 0xd9, - 0xd8, 0x37, 0x88, 0x8d, 0xc0, 0xb8, 0xaa, 0x06, 0xf6, 0xf7, 0x6d, 0xb8, 0x0e, 0x3e, 0x09, 0x51, - 0x4e, 0x2e, 0xb0, 0x31, 0x64, 0x24, 0x9c, 0x74, 0x5e, 0x50, 0x0a, 0xae, 0x39, 0xea, 0x90, 0x0b, - 0xdc, 0x16, 0x41, 0xf8, 0x2d, 0x98, 0xe3, 0x8e, 0xc9, 0x07, 0xc4, 0xeb, 0xa3, 0x79, 0x45, 0xaa, - 0xe4, 0xb7, 0xbf, 0x98, 0x75, 0x15, 0x31, 0x25, 0x7a, 0x45, 0xf4, 0xb1, 0xe4, 0xeb, 0xcc, 0xd5, - 0xaf, 0xe5, 0xd4, 0xda, 0x5d, 0x06, 0xbc, 0x9f, 0xa4, 0xc0, 0x1d, 0xb0, 0xc4, 0xf0, 0x90, 0x32, - 0x1f, 0xdb, 0x06, 0xa3, 0x81, 0x67, 0x73, 0xe3, 0x9c, 0x78, 0x36, 0x3d, 0x47, 0x92, 0x22, 0x55, - 0xd2, 0xfa, 0x62, 0x82, 0xea, 0x02, 0xfc, 0x5e, 0x60, 0xf0, 0x04, 0x7c, 0xe6, 0x12, 0xcf, 0x18, - 0x2b, 0x87, 0x98, 0x25, 0xb2, 0x37, 0x8a, 0x54, 0x99, 0xaf, 0x55, 0x6f, 0xee, 0xcb, 0xa9, 0xbb, - 0xfb, 0xf2, 0x7a, 0x9f, 0xf8, 0x83, 0xa0, 0xab, 0x5a, 0xd4, 0xd5, 0x2c, 0xca, 0x5d, 0xca, 0xe3, - 0xbf, 0xaf, 0xb8, 0x7d, 0xaa, 0xf9, 0x3f, 0x0e, 0x31, 0x57, 0x1b, 0xd8, 0xfa, 0xfd, 0xdf, 0x3f, - 0x36, 0x24, 0x7d, 0xd1, 0x25, 0x9e, 0x1e, 0xa7, 0x6c, 0x63, 0x16, 0x7b, 0x59, 0x60, 0x39, 0x6a, - 0xd0, 0x70, 0x09, 0xe7, 0xc6, 0x89, 0x49, 0x1c, 0x23, 0xd9, 0x0a, 0x94, 0x16, 0xa3, 0x58, 0x56, - 0xa3, 0xb5, 0x51, 0x93, 0xb5, 0x51, 0x1b, 0x31, 0xa1, 0x56, 0x08, 0x0b, 0xb9, 0xfa, 0xbb, 0x2c, - 0x45, 0x16, 0x4b, 0x51, 0xaa, 0x43, 0xc2, 0xf9, 0x81, 0x49, 0x9c, 0x84, 0x06, 0x5d, 0x20, 0x27, - 0x26, 0xa6, 0x43, 0x2c, 0x42, 0x83, 0x69, 0xa7, 0xcc, 0x47, 0x3a, 0xad, 0xc4, 0x4e, 0x49, 0xba, - 0x09, 0x3b, 0x0b, 0x2c, 0x88, 0xab, 0x31, 0x7a, 0xcc, 0xb4, 0xc2, 0x88, 0xe8, 0x0d, 0x65, 0x5f, - 0x3f, 0xbb, 0xa2, 0xc8, 0xb7, 0x17, 0xa7, 0x0b, 0xfb, 0x83, 0x2e, 0x40, 0xd3, 0x26, 0x49, 0x31, - 0x62, 0x39, 0x5e, 0xe9, 0xb4, 0x34, 0xe9, 0x94, 0xa4, 0xdc, 0xf8, 0x49, 0x02, 0x85, 0x89, 0xf7, - 0x1f, 0x7e, 0x03, 0x4a, 0xf5, 0xa3, 0x56, 0xa7, 0xd9, 0xea, 0x1c, 0x77, 0x8c, 0xc3, 0xa3, 0x46, - 0xd3, 0x38, 0x6e, 0x75, 0xda, 0xcd, 0xfa, 0xfe, 0xde, 0x7e, 0xb3, 0xf1, 0x21, 0x55, 0x5a, 0xbd, - 0xbc, 0x56, 0xd0, 0x84, 0xe4, 0xd8, 0xe3, 0x43, 0x6c, 0x91, 0x1e, 0xc1, 0x36, 0x54, 0xc1, 0xc2, - 0x94, 0x7a, 0xb7, 0xb3, 0xdb, 0xfe, 0x20, 0x95, 0x3e, 0xbd, 0xbc, 0x56, 0x8a, 0x13, 0xb2, 0x10, - 0x28, 0x65, 0x7e, 0xfe, 0x4d, 0x4e, 0xd5, 0x0e, 0x6e, 0x1e, 0x64, 0xe9, 0xf6, 0x41, 0x96, 0xfe, - 0x79, 0x90, 0xa5, 0x5f, 0x1e, 0xe5, 0xd4, 0xed, 0xa3, 0x9c, 0xfa, 0xeb, 0x51, 0x4e, 0xfd, 0xb0, - 0xf9, 0xa4, 0xc9, 0x66, 0xb4, 0x39, 0x2d, 0xec, 0x9f, 0x53, 0x76, 0xaa, 0x25, 0x1f, 0xec, 0x51, - 0xf2, 0xc9, 0x16, 0x2d, 0x77, 0x73, 0xe2, 0x92, 0xab, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x58, - 0x98, 0x95, 0x8e, 0x35, 0x06, 0x00, 0x00, + // 772 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xbf, 0x4f, 0xc3, 0x46, + 0x14, 0xc7, 0x63, 0xf2, 0x03, 0xb8, 0x10, 0x0a, 0x57, 0x4a, 0x8f, 0x40, 0x1d, 0x17, 0x55, 0x28, + 0x42, 0xaa, 0x0d, 0x84, 0xa9, 0x6a, 0x07, 0xf2, 0x03, 0x09, 0x24, 0x42, 0xe4, 0x14, 0x55, 0xea, + 0x62, 0x39, 0xf6, 0x25, 0x39, 0xb0, 0x7d, 0xd1, 0x9d, 0x0d, 0x29, 0x6b, 0x97, 0x8a, 0xa9, 0x23, + 0x0b, 0x52, 0xa5, 0x2e, 0x1d, 0xbb, 0xf7, 0x1f, 0x60, 0x64, 0xac, 0x3a, 0xd0, 0x0a, 0x86, 0xfe, + 0x05, 0xdd, 0x2b, 0x9f, 0xed, 0x40, 0x42, 0x18, 0xda, 0x25, 0xf1, 0xbd, 0xef, 0xe7, 0xfb, 0xde, + 0xbb, 0xe7, 0x3b, 0x03, 0x19, 0x0f, 0xa9, 0x45, 0x19, 0xd6, 0x28, 0x33, 0x2d, 0x07, 0x6b, 0x97, + 0xbb, 0xda, 0xc0, 0x64, 0xa6, 0xcb, 0xd5, 0x01, 0xa3, 0x3e, 0x85, 0xcb, 0xb1, 0xae, 0x46, 0xba, + 0x7a, 0xb9, 0x5b, 0x5c, 0x36, 0x5d, 0xe2, 0x51, 0x4d, 0xfc, 0x46, 0x54, 0x71, 0xe3, 0x6d, 0x16, + 0xe2, 0x75, 0x13, 0xf5, 0xb3, 0xb7, 0xaa, 0x4f, 0x2f, 0xb0, 0x67, 0x74, 0x31, 0xb6, 0x31, 0x8b, + 0xa9, 0x95, 0x1e, 0xed, 0x51, 0xf1, 0xa8, 0x85, 0x4f, 0x71, 0x54, 0xee, 0x51, 0xda, 0x73, 0xb0, + 0x26, 0x56, 0x9d, 0xa0, 0xab, 0xd9, 0x01, 0x33, 0x7d, 0x42, 0xbd, 0x48, 0xdf, 0xfc, 0x2d, 0x03, + 0x72, 0x2d, 0xd1, 0x30, 0xdc, 0x01, 0x39, 0xab, 0x6f, 0x12, 0x8f, 0x23, 0x49, 0x49, 0x97, 0xf3, + 0x7b, 0x48, 0x7d, 0xd3, 0xbb, 0x5a, 0x0b, 0x01, 0x3d, 0xe6, 0x42, 0x87, 0x68, 0x84, 0xa3, 0x99, + 0x77, 0x1d, 0x5f, 0x87, 0x80, 0x1e, 0x73, 0xb0, 0x02, 0x66, 0x39, 0x0d, 0x98, 0x85, 0x39, 0x4a, + 0x0b, 0xcb, 0xda, 0x14, 0x4b, 0x5b, 0x10, 0x7a, 0x42, 0xc2, 0x0a, 0xc8, 0xb2, 0xc0, 0xc1, 0x1c, + 0x65, 0x84, 0xe5, 0x93, 0x29, 0x16, 0x3d, 0x70, 0x70, 0x6c, 0x8b, 0x58, 0x58, 0x03, 0x85, 0xd7, + 0x43, 0xe2, 0x28, 0x2b, 0xcc, 0xf2, 0x7b, 0x2d, 0x1e, 0x0a, 0x4c, 0x5f, 0xf0, 0x5f, 0x16, 0x1c, + 0xae, 0x83, 0x79, 0xd7, 0x1c, 0x1a, 0x1e, 0xf5, 0x2c, 0x8c, 0x72, 0x8a, 0x54, 0xce, 0xea, 0x73, + 0xae, 0x39, 0x6c, 0x86, 0x6b, 0x58, 0x02, 0x79, 0xbf, 0xcf, 0x30, 0xef, 0x53, 0xc7, 0x36, 0x4c, + 0x34, 0x2b, 0x64, 0x30, 0x0a, 0x1d, 0x8c, 0x03, 0x1d, 0x34, 0x37, 0x01, 0x54, 0xe1, 0x3e, 0xc8, + 0xb8, 0xd4, 0xc6, 0x68, 0x5e, 0x91, 0xca, 0x8b, 0x7b, 0xca, 0xb4, 0x79, 0x53, 0x8f, 0x63, 0x8f, + 0x07, 0xfc, 0x84, 0xda, 0x58, 0x17, 0x34, 0xdc, 0x00, 0x20, 0x6c, 0xca, 0xc6, 0xbe, 0x41, 0x6c, + 0x04, 0x46, 0x5d, 0xd5, 0xb1, 0x7f, 0x64, 0xc3, 0x2d, 0xf0, 0x41, 0xa8, 0x72, 0x72, 0x8d, 0x8d, + 0x01, 0x23, 0xe1, 0xa4, 0xf3, 0x02, 0x29, 0xb8, 0xe6, 0xb0, 0x4d, 0xae, 0x71, 0x4b, 0x04, 0xe1, + 0x57, 0x60, 0x8e, 0x3b, 0x26, 0xef, 0x13, 0xaf, 0x87, 0x16, 0x14, 0xa9, 0x9c, 0xdf, 0xfb, 0x74, + 0xda, 0xab, 0x88, 0x91, 0xe8, 0x88, 0xe8, 0x23, 0xcb, 0x17, 0x99, 0xdb, 0x9f, 0x4a, 0xa9, 0xcd, + 0x7f, 0xd2, 0x60, 0x71, 0x1c, 0x81, 0xfb, 0x60, 0x95, 0xe1, 0x01, 0x65, 0x3e, 0xb6, 0x0d, 0x46, + 0x03, 0xcf, 0xe6, 0xc6, 0x15, 0xf1, 0x6c, 0x7a, 0x85, 0x24, 0x45, 0x2a, 0xa7, 0xf5, 0x95, 0x44, + 0xd5, 0x85, 0xf8, 0x8d, 0xd0, 0xe0, 0x39, 0xf8, 0xd8, 0x25, 0x9e, 0x31, 0x72, 0x0e, 0x30, 0x4b, + 0x6c, 0x33, 0x8a, 0x54, 0x5e, 0xa8, 0x56, 0xee, 0x1f, 0x4b, 0xa9, 0x3f, 0x1e, 0x4b, 0x5b, 0x3d, + 0xe2, 0xf7, 0x83, 0x8e, 0x6a, 0x51, 0x57, 0xb3, 0x28, 0x77, 0x29, 0x8f, 0xff, 0x3e, 0xe7, 0xf6, + 0x85, 0xe6, 0x7f, 0x37, 0xc0, 0x5c, 0xad, 0x63, 0xeb, 0x97, 0xbf, 0x7f, 0xdd, 0x96, 0xf4, 0x15, + 0x97, 0x78, 0x7a, 0x9c, 0xb2, 0x85, 0x59, 0x5c, 0xcb, 0x02, 0x6b, 0xd1, 0x06, 0x0d, 0x97, 0x70, + 0x6e, 0x9c, 0x9b, 0xc4, 0x31, 0x92, 0x5b, 0x81, 0xd2, 0x62, 0x14, 0x6b, 0x6a, 0x74, 0x6d, 0xd4, + 0xe4, 0xda, 0xa8, 0xf5, 0x18, 0xa8, 0x16, 0xc2, 0x46, 0x6e, 0xff, 0x2c, 0x49, 0x51, 0x89, 0xd5, + 0x28, 0xd5, 0x09, 0xe1, 0xfc, 0xd8, 0x24, 0x4e, 0x82, 0x41, 0x17, 0xc8, 0x49, 0x11, 0xd3, 0x21, + 0x16, 0xa1, 0xc1, 0x64, 0xa5, 0xcc, 0x7f, 0xac, 0xb4, 0x1e, 0x57, 0x4a, 0xd2, 0x4d, 0x94, 0x43, + 0xe2, 0xd5, 0x18, 0x5d, 0x66, 0x5a, 0x61, 0xe4, 0xa5, 0x2c, 0xca, 0xfe, 0xff, 0x01, 0xae, 0x8a, + 0xa4, 0x87, 0x71, 0xce, 0x51, 0xe9, 0xed, 0xef, 0x25, 0x50, 0x18, 0x3b, 0x9a, 0xf0, 0x4b, 0x50, + 0xac, 0x9d, 0x36, 0xdb, 0x8d, 0x66, 0xfb, 0xac, 0x6d, 0x9c, 0x9c, 0xd6, 0x1b, 0xc6, 0x59, 0xb3, + 0xdd, 0x6a, 0xd4, 0x8e, 0x0e, 0x8f, 0x1a, 0xf5, 0xa5, 0x54, 0x71, 0xe3, 0xe6, 0x4e, 0x41, 0x63, + 0x96, 0x33, 0x8f, 0x0f, 0xb0, 0x45, 0xba, 0x04, 0xdb, 0x50, 0x05, 0x1f, 0x4e, 0xb8, 0x0f, 0xda, + 0x07, 0xad, 0x25, 0xa9, 0xf8, 0xd1, 0xcd, 0x9d, 0xb2, 0x3c, 0x66, 0x0b, 0x85, 0x62, 0xe6, 0x87, + 0x9f, 0xe5, 0x54, 0xf5, 0xf8, 0xfe, 0x49, 0x96, 0x1e, 0x9e, 0x64, 0xe9, 0xaf, 0x27, 0x59, 0xfa, + 0xf1, 0x59, 0x4e, 0x3d, 0x3c, 0xcb, 0xa9, 0xdf, 0x9f, 0xe5, 0xd4, 0xb7, 0x3b, 0xaf, 0x36, 0xd9, + 0x88, 0x0e, 0x75, 0x13, 0xfb, 0x57, 0x94, 0x5d, 0x68, 0xc9, 0xb7, 0x74, 0x98, 0x7c, 0x4d, 0xc5, + 0x96, 0x3b, 0x39, 0x31, 0xff, 0xca, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x24, 0x82, 0xa5, 0x31, + 0xd0, 0x05, 0x00, 0x00, } func (m *Params) Marshal() (dAtA []byte, err error) { @@ -498,16 +495,6 @@ func (m *SlashingParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintParams(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x32 - { - size := m.SlashFractionMiss.Size() - i -= size - if _, err := m.SlashFractionMiss.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintParams(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x2a n2, err2 := github_com_cosmos_gogoproto_types.StdDurationMarshalTo(m.OracleMaliciousJailDuration, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdDuration(m.OracleMaliciousJailDuration):]) if err2 != nil { @@ -630,8 +617,6 @@ func (m *SlashingParams) Size() (n int) { n += 1 + l + sovParams(uint64(l)) l = github_com_cosmos_gogoproto_types.SizeOfStdDuration(m.OracleMaliciousJailDuration) n += 1 + l + sovParams(uint64(l)) - l = m.SlashFractionMiss.Size() - n += 1 + l + sovParams(uint64(l)) l = m.SlashFractionMalicious.Size() n += 1 + l + sovParams(uint64(l)) return n @@ -1161,39 +1146,6 @@ func (m *SlashingParams) Unmarshal(dAtA []byte) error { } iNdEx = postIndex case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SlashFractionMiss", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthParams - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthParams - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SlashFractionMiss.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SlashFractionMalicious", wireType) } diff --git a/x/oracle/types/query.pb.go b/x/oracle/types/query.pb.go index 8d75f49fe..b3c235aa6 100644 --- a/x/oracle/types/query.pb.go +++ b/x/oracle/types/query.pb.go @@ -216,8 +216,10 @@ func (m *QueryStakerListRequest) GetAssetId() string { // QueryStakerListResponse is response type for Query/StakerList RPC method type QueryStakerListResponse struct { + // version of the staker validator list changes + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` // staker list including all stakers of request asset - StakerList *StakerList `protobuf:"bytes,1,opt,name=staker_list,json=stakerList,proto3" json:"staker_list,omitempty"` + StakerList *StakerList `protobuf:"bytes,2,opt,name=staker_list,json=stakerList,proto3" json:"staker_list,omitempty"` } func (m *QueryStakerListResponse) Reset() { *m = QueryStakerListResponse{} } @@ -253,6 +255,13 @@ func (m *QueryStakerListResponse) XXX_DiscardUnknown() { var xxx_messageInfo_QueryStakerListResponse proto.InternalMessageInfo +func (m *QueryStakerListResponse) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + func (m *QueryStakerListResponse) GetStakerList() *StakerList { if m != nil { return m.StakerList @@ -317,8 +326,10 @@ func (m *QueryStakerInfoRequest) GetStakerAddr() string { // QueryStakerInfoResponse is response type for Query/StakerInfo RCP method type QueryStakerInfoResponse struct { + // version of the staker validator list changes + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` // all staker infos under the specified asset - StakerInfo *StakerInfo `protobuf:"bytes,1,opt,name=staker_info,json=stakerInfo,proto3" json:"staker_info,omitempty"` + StakerInfo *StakerInfo `protobuf:"bytes,2,opt,name=staker_info,json=stakerInfo,proto3" json:"staker_info,omitempty"` } func (m *QueryStakerInfoResponse) Reset() { *m = QueryStakerInfoResponse{} } @@ -354,6 +365,13 @@ func (m *QueryStakerInfoResponse) XXX_DiscardUnknown() { var xxx_messageInfo_QueryStakerInfoResponse proto.InternalMessageInfo +func (m *QueryStakerInfoResponse) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + func (m *QueryStakerInfoResponse) GetStakerInfo() *StakerInfo { if m != nil { return m.StakerInfo @@ -418,10 +436,12 @@ func (m *QueryStakerInfosRequest) GetPagination() *query.PageRequest { // QueryStakerInfosResponse is response type for Query/StakerInfo RCP method type QueryStakerInfosResponse struct { + // version of the staker validator list changes + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` // all staker infos under the specified asset - StakerInfos []*StakerInfo `protobuf:"bytes,1,rep,name=staker_infos,json=stakerInfos,proto3" json:"staker_infos,omitempty"` + StakerInfos []*StakerInfo `protobuf:"bytes,2,rep,name=staker_infos,json=stakerInfos,proto3" json:"staker_infos,omitempty"` // pagination defines the pagination in the response. - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` + Pagination *query.PageResponse `protobuf:"bytes,3,opt,name=pagination,proto3" json:"pagination,omitempty"` } func (m *QueryStakerInfosResponse) Reset() { *m = QueryStakerInfosResponse{} } @@ -457,6 +477,13 @@ func (m *QueryStakerInfosResponse) XXX_DiscardUnknown() { var xxx_messageInfo_QueryStakerInfosResponse proto.InternalMessageInfo +func (m *QueryStakerInfosResponse) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + func (m *QueryStakerInfosResponse) GetStakerInfos() []*StakerInfo { if m != nil { return m.StakerInfos @@ -1511,94 +1538,95 @@ func init() { func init() { proto.RegisterFile("exocore/oracle/v1/query.proto", fileDescriptor_b8cba1249806967d) } var fileDescriptor_b8cba1249806967d = []byte{ - // 1380 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x98, 0x5f, 0x6f, 0xdb, 0xd4, - 0x1b, 0xc7, 0xeb, 0xb5, 0xeb, 0x7e, 0x7d, 0xd2, 0xed, 0x47, 0xcf, 0xca, 0x68, 0xbd, 0x35, 0x4d, - 0xbd, 0xa6, 0x4d, 0x9b, 0xce, 0xa7, 0x69, 0x11, 0x85, 0x0b, 0xfe, 0xb4, 0x12, 0x54, 0x9d, 0xc6, - 0xd4, 0x86, 0x82, 0xc4, 0x84, 0x88, 0x9c, 0xe4, 0x2c, 0x98, 0xba, 0x71, 0x66, 0xbb, 0xa1, 0xa3, - 0x54, 0x48, 0xdc, 0xee, 0x66, 0xd2, 0xc4, 0xcd, 0x04, 0x12, 0x20, 0x10, 0x77, 0xdc, 0x70, 0xc7, - 0x2b, 0xd8, 0xe5, 0x24, 0x6e, 0xb8, 0x42, 0xa8, 0xe5, 0x85, 0x20, 0x1f, 0x3f, 0x4e, 0xec, 0xf8, - 0xd8, 0x71, 0xa7, 0xde, 0xc5, 0xf6, 0xf3, 0xe7, 0xf3, 0x3c, 0xe7, 0x39, 0xc7, 0xdf, 0x18, 0xa6, - 0xd8, 0xa1, 0x59, 0x33, 0x2d, 0x46, 0x4d, 0x4b, 0xab, 0x19, 0x8c, 0xb6, 0x4b, 0xf4, 0xc1, 0x01, - 0xb3, 0x1e, 0xaa, 0x2d, 0xcb, 0x74, 0x4c, 0x32, 0x86, 0x8f, 0x55, 0xef, 0xb1, 0xda, 0x2e, 0xc9, - 0x8b, 0x35, 0xd3, 0xde, 0x37, 0x6d, 0x5a, 0xd5, 0x6c, 0xe6, 0xd9, 0xd2, 0x76, 0xa9, 0xca, 0x1c, - 0xad, 0x44, 0x5b, 0x5a, 0x43, 0x6f, 0x6a, 0x8e, 0x6e, 0x36, 0x3d, 0x77, 0xb9, 0x10, 0x8d, 0xae, - 0x37, 0xeb, 0xec, 0xb0, 0x62, 0xb1, 0x1a, 0x6b, 0x3a, 0x95, 0x7d, 0xbb, 0x81, 0x96, 0xc5, 0x3e, - 0x96, 0x2d, 0xcd, 0xd2, 0xf6, 0x6d, 0x34, 0x9e, 0x8d, 0x1a, 0xbb, 0x69, 0xdb, 0xac, 0xe2, 0x98, - 0x7b, 0xcc, 0x4f, 0x9e, 0x8d, 0x5a, 0x85, 0xa2, 0x08, 0x4a, 0x6f, 0x59, 0x7a, 0x8d, 0x25, 0xb8, - 0xbb, 0x8f, 0x7d, 0x77, 0x25, 0xfa, 0x3c, 0x52, 0x55, 0x3e, 0xd6, 0x26, 0x44, 0xa2, 0x46, 0xcd, - 0xda, 0x9a, 0xa1, 0xd7, 0x35, 0xc7, 0xb4, 0x2a, 0x07, 0xad, 0xba, 0xe6, 0xb0, 0x4a, 0xd5, 0x30, - 0x6b, 0x7b, 0x68, 0x3f, 0xde, 0x30, 0x1b, 0x26, 0xff, 0x49, 0xdd, 0x5f, 0x78, 0xf7, 0x46, 0xc3, - 0x34, 0x1b, 0x06, 0xa3, 0x5a, 0x4b, 0xa7, 0x5a, 0xb3, 0x69, 0x3a, 0x7c, 0x25, 0x30, 0x87, 0xf2, - 0x3a, 0xc0, 0xae, 0xdb, 0x9c, 0x2d, 0xb7, 0xab, 0x64, 0x1c, 0x2e, 0xf2, 0x56, 0x4d, 0x48, 0x39, - 0xa9, 0x30, 0x52, 0xf6, 0x2e, 0xdc, 0xbb, 0xbc, 0xe9, 0x13, 0x17, 0x72, 0x52, 0x61, 0xa8, 0xec, - 0x5d, 0x28, 0x32, 0x4c, 0xec, 0xb8, 0xcb, 0xdc, 0x75, 0x67, 0x76, 0x99, 0x3d, 0x38, 0x60, 0xb6, - 0xa3, 0x54, 0x60, 0x52, 0xf0, 0xcc, 0x6e, 0x99, 0x4d, 0x9b, 0x91, 0x0d, 0xb8, 0xcc, 0xe3, 0x56, - 0x74, 0xef, 0xc1, 0x84, 0x94, 0x1b, 0x2c, 0x64, 0x56, 0xa6, 0xd4, 0xc8, 0x50, 0xa9, 0x5d, 0xff, - 0xf2, 0xa8, 0x13, 0x88, 0xa5, 0xac, 0xc2, 0x35, 0x9e, 0xe0, 0x03, 0x47, 0xdb, 0x63, 0xd6, 0x1d, - 0xdd, 0x76, 0x30, 0x35, 0x99, 0x84, 0xff, 0x69, 0xb6, 0xcd, 0x9c, 0x8a, 0x5e, 0xc7, 0x2a, 0x2e, - 0xf1, 0xeb, 0xad, 0xba, 0xf2, 0x31, 0xbc, 0x12, 0x71, 0x42, 0xa6, 0xb7, 0x20, 0x63, 0xf3, 0xbb, - 0x15, 0x43, 0xb7, 0x1d, 0xee, 0x28, 0x26, 0x0a, 0xf8, 0x82, 0xdd, 0xf9, 0xad, 0xec, 0x86, 0x78, - 0xb6, 0x9a, 0xf7, 0xcd, 0xfe, 0x3c, 0x64, 0xba, 0x93, 0x54, 0xab, 0xd7, 0x2d, 0xde, 0xdd, 0x11, - 0x3f, 0xea, 0x7a, 0xbd, 0x6e, 0xf5, 0x00, 0x7b, 0x51, 0x23, 0xc0, 0x7a, 0xf3, 0xbe, 0xd9, 0x17, - 0x98, 0xfb, 0x62, 0x68, 0xf7, 0xb7, 0xf2, 0x55, 0x24, 0xb4, 0x9d, 0x82, 0xf8, 0x3d, 0x80, 0xee, - 0x66, 0xe6, 0xc0, 0x99, 0x95, 0x39, 0xd5, 0xdb, 0xf9, 0xaa, 0xbb, 0xf3, 0x55, 0xef, 0x94, 0xc0, - 0x9d, 0xaf, 0x6e, 0x6b, 0x0d, 0x86, 0x61, 0xcb, 0x01, 0x4f, 0xe5, 0x17, 0x09, 0x87, 0x27, 0x94, - 0x1e, 0x4b, 0x7b, 0x07, 0x46, 0x03, 0xa5, 0x25, 0x8d, 0x47, 0xa0, 0xb6, 0x4c, 0xb7, 0x36, 0x9b, - 0x6c, 0x0a, 0x30, 0xe7, 0xfb, 0x62, 0x7a, 0xe9, 0x43, 0x9c, 0xe3, 0x40, 0x38, 0xe6, 0x36, 0xdf, - 0x96, 0xfe, 0x74, 0xdf, 0x85, 0xab, 0xa1, 0xbb, 0xc8, 0xbd, 0x06, 0xc3, 0xde, 0xf6, 0xc5, 0xd5, - 0x98, 0x14, 0x10, 0x7b, 0x2e, 0x1b, 0x43, 0xcf, 0xfe, 0x9e, 0x1e, 0x28, 0xa3, 0xb9, 0xb2, 0x02, - 0x2f, 0xf3, 0x78, 0x9b, 0xcc, 0xd9, 0xe6, 0x47, 0x49, 0x60, 0x25, 0x70, 0xa7, 0x78, 0x2b, 0x31, - 0x54, 0xbe, 0xe4, 0xed, 0x82, 0xba, 0xb2, 0x06, 0xb2, 0xef, 0x73, 0x47, 0x73, 0x98, 0xed, 0x79, - 0xa6, 0x70, 0xdc, 0xc1, 0x49, 0x0d, 0x24, 0x0b, 0xf0, 0xf3, 0x3b, 0x49, 0xfc, 0xdc, 0xa0, 0xc3, - 0xcf, 0xaf, 0x94, 0x4f, 0xe0, 0xba, 0x90, 0x05, 0xe3, 0xbe, 0x09, 0x17, 0xb9, 0x21, 0x86, 0x9d, - 0x89, 0x0b, 0xbb, 0xab, 0xef, 0xb3, 0xb2, 0x79, 0xd0, 0xac, 0x63, 0x78, 0xcf, 0x4b, 0xa9, 0x60, - 0x77, 0xd6, 0x0d, 0x23, 0xdc, 0x9d, 0xf0, 0x30, 0x4a, 0x2f, 0x3c, 0x8c, 0x4f, 0x25, 0x6c, 0x49, - 0x20, 0x83, 0xa0, 0x25, 0x83, 0x67, 0x68, 0xc9, 0xf9, 0x4d, 0x60, 0x1e, 0x6e, 0xfa, 0xbd, 0xfd, - 0xc8, 0x3f, 0xfb, 0x3f, 0xe4, 0x47, 0xff, 0x86, 0x7b, 0xf2, 0xfb, 0x23, 0xf9, 0x48, 0x82, 0xd9, - 0x64, 0x3b, 0xac, 0xa8, 0x06, 0xd7, 0xc4, 0xef, 0x10, 0x6c, 0xe0, 0xbc, 0xa0, 0x42, 0x51, 0x40, - 0xac, 0x77, 0xbc, 0x2d, 0x78, 0xa6, 0x28, 0x90, 0xf3, 0x61, 0xbc, 0xc3, 0x9b, 0xbf, 0xdc, 0xc2, - 0x9b, 0xe8, 0x6b, 0x98, 0x49, 0xb0, 0x41, 0xda, 0x7b, 0x70, 0x55, 0xf0, 0xba, 0x47, 0xd4, 0x59, - 0x01, 0x6a, 0x24, 0x14, 0x72, 0x8e, 0xe9, 0xbd, 0x0f, 0x94, 0x69, 0x98, 0x12, 0x00, 0xbc, 0x6f, - 0x37, 0x7c, 0x42, 0x1b, 0xb2, 0x71, 0x06, 0x88, 0xb7, 0x03, 0x2f, 0xf5, 0xea, 0x96, 0x84, 0x21, - 0x0f, 0x07, 0x41, 0xb0, 0x2b, 0x7a, 0xe8, 0xae, 0xb2, 0x8c, 0x07, 0xe3, 0x26, 0x73, 0x7a, 0x81, - 0xdc, 0xf7, 0x70, 0x77, 0xa9, 0x86, 0xca, 0xde, 0x85, 0xf2, 0x29, 0xbe, 0x6b, 0xc3, 0x1e, 0x48, - 0xb8, 0x0e, 0x10, 0x61, 0xbb, 0x21, 0x60, 0xeb, 0xc5, 0x1a, 0xb1, 0x3a, 0x44, 0x55, 0x24, 0x5a, - 0x37, 0x8c, 0x08, 0xd1, 0x79, 0x6d, 0xc1, 0x5f, 0x25, 0x2c, 0x22, 0x9c, 0x24, 0xa6, 0x88, 0xc1, - 0x33, 0x17, 0x71, 0x7e, 0xfb, 0x71, 0xb5, 0x7b, 0xd6, 0x09, 0xa6, 0x3a, 0x66, 0x89, 0x3e, 0x87, - 0x1b, 0x62, 0x27, 0x2c, 0xf0, 0x36, 0x5c, 0x16, 0x0d, 0xf8, 0x74, 0x6c, 0x8d, 0xa1, 0xd9, 0x1e, - 0xb5, 0x82, 0x63, 0xcd, 0x10, 0xb0, 0xd3, 0xc9, 0x30, 0xe0, 0x79, 0xad, 0xd8, 0xef, 0x12, 0xd6, - 0x14, 0xc9, 0x13, 0x5f, 0xd3, 0xe0, 0x0b, 0xd6, 0x74, 0x6e, 0xab, 0xb7, 0xf2, 0xfd, 0x18, 0x5c, - 0xe4, 0xd4, 0xe4, 0x89, 0x04, 0xa3, 0x41, 0x75, 0x4a, 0x8a, 0x02, 0xb0, 0x38, 0x7d, 0x2b, 0x2f, - 0xa5, 0x33, 0xf6, 0x08, 0x94, 0xc2, 0x37, 0x7f, 0xfe, 0xfb, 0xe4, 0x82, 0x42, 0x72, 0x34, 0x2a, - 0xe8, 0x43, 0x4a, 0x98, 0x3c, 0x92, 0x00, 0xba, 0x0a, 0x93, 0x2c, 0xc4, 0xa5, 0x89, 0xc8, 0x5e, - 0x79, 0x31, 0x8d, 0x29, 0xf2, 0xcc, 0x71, 0x9e, 0x1c, 0xc9, 0x0a, 0x78, 0x02, 0x2a, 0x98, 0x3c, - 0x95, 0x20, 0x13, 0x10, 0x68, 0xa4, 0x4f, 0x8e, 0xa0, 0x88, 0x94, 0x8b, 0xa9, 0x6c, 0x11, 0x68, - 0x85, 0x03, 0x2d, 0x91, 0xc5, 0x78, 0x20, 0x2e, 0x05, 0xe9, 0x91, 0x2f, 0x4c, 0x8f, 0xc9, 0xcf, - 0x9d, 0x56, 0xb9, 0xb1, 0xfa, 0xb5, 0x2a, 0xa0, 0xc8, 0xe5, 0xc5, 0x34, 0xa6, 0x48, 0xf6, 0x36, - 0x27, 0x7b, 0x83, 0xac, 0x25, 0x93, 0x05, 0xc0, 0xe8, 0x51, 0x40, 0xd3, 0x1f, 0x93, 0x2f, 0x61, - 0x18, 0x87, 0x38, 0x1f, 0x97, 0x36, 0xb4, 0x41, 0xe5, 0xb9, 0x7e, 0x66, 0x48, 0x36, 0xc3, 0xc9, - 0xae, 0x93, 0x49, 0x1a, 0xf7, 0x7f, 0xd6, 0x9d, 0xa6, 0x61, 0x4f, 0x9d, 0x90, 0x42, 0x5c, 0xd4, - 0x5e, 0xcd, 0x29, 0x2f, 0xa4, 0xb0, 0x44, 0x84, 0x25, 0x8e, 0x30, 0x47, 0x66, 0x69, 0xdc, 0x7f, - 0x62, 0x7a, 0xe4, 0xcb, 0xd0, 0x63, 0xf2, 0x83, 0x04, 0x99, 0x80, 0x3c, 0x24, 0xb7, 0x12, 0x12, - 0x45, 0x25, 0xad, 0xac, 0xa6, 0x35, 0x4f, 0x31, 0x53, 0x06, 0xb7, 0xaf, 0x70, 0xc6, 0x20, 0xe2, - 0x1f, 0x12, 0x8c, 0x8b, 0xc4, 0x0e, 0x79, 0x2d, 0x21, 0x79, 0x82, 0x2c, 0x93, 0xd7, 0xce, 0xec, - 0x87, 0xf4, 0x25, 0x4e, 0x5f, 0x24, 0x0b, 0x34, 0xed, 0x37, 0x00, 0xf2, 0x9b, 0x04, 0x63, 0x11, - 0xf9, 0x43, 0x56, 0x13, 0x08, 0xe2, 0xb4, 0x99, 0xfc, 0xea, 0xd9, 0x9c, 0x90, 0x59, 0xe5, 0xcc, - 0x05, 0x32, 0x47, 0x53, 0x7d, 0xb4, 0x21, 0x3f, 0x49, 0x70, 0x25, 0xac, 0x89, 0xc8, 0x72, 0xba, - 0xc4, 0x5d, 0x05, 0x22, 0x97, 0xce, 0xe0, 0x81, 0x9c, 0x45, 0xce, 0x99, 0x27, 0x37, 0x69, 0xff, - 0xcf, 0x50, 0xe4, 0x5b, 0x09, 0x46, 0xba, 0x7c, 0xc5, 0x84, 0x6c, 0x11, 0xb4, 0xa5, 0x74, 0xc6, - 0x48, 0x75, 0x8b, 0x53, 0xcd, 0x93, 0x3c, 0x4d, 0xfa, 0x80, 0x44, 0x8f, 0xf8, 0x62, 0x1f, 0x93, - 0xc7, 0x12, 0x8c, 0x76, 0x82, 0xac, 0x1b, 0x46, 0x3c, 0x9a, 0x40, 0xb7, 0xc5, 0xa3, 0x89, 0xf4, - 0x97, 0x92, 0xe7, 0x68, 0xd3, 0x64, 0x2a, 0x11, 0x8d, 0xfc, 0xd8, 0x41, 0xc2, 0xd9, 0x53, 0xfb, - 0x36, 0x20, 0x3c, 0x76, 0x34, 0xb5, 0x3d, 0x82, 0x2d, 0x73, 0xb0, 0x45, 0x52, 0xa0, 0x7d, 0x3e, - 0xa8, 0x75, 0xda, 0xf6, 0x9d, 0x04, 0xff, 0x0f, 0x86, 0x72, 0x3b, 0xa7, 0xf6, 0x6d, 0x46, 0x4a, - 0xcc, 0x18, 0x29, 0x94, 0xf8, 0xfe, 0x0f, 0x61, 0x6e, 0xdc, 0x7e, 0x76, 0x92, 0x95, 0x9e, 0x9f, - 0x64, 0xa5, 0x7f, 0x4e, 0xb2, 0xd2, 0xe3, 0xd3, 0xec, 0xc0, 0xf3, 0xd3, 0xec, 0xc0, 0x5f, 0xa7, - 0xd9, 0x81, 0x7b, 0xcb, 0x0d, 0xdd, 0xf9, 0xec, 0xa0, 0xaa, 0xd6, 0xcc, 0x7d, 0xfa, 0xae, 0x17, - 0xe5, 0x2e, 0x73, 0xbe, 0x30, 0xad, 0xbd, 0x4e, 0xd0, 0x43, 0x3f, 0xac, 0xf3, 0xb0, 0xc5, 0xec, - 0xea, 0x30, 0xff, 0xc0, 0xb7, 0xfa, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0f, 0x2e, 0xfe, 0x2d, - 0xcb, 0x15, 0x00, 0x00, + // 1404 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x98, 0xcf, 0x6f, 0xdb, 0x64, + 0x18, 0xc7, 0xeb, 0xa6, 0xeb, 0xe8, 0x93, 0x6e, 0xd0, 0x77, 0x65, 0xa4, 0xde, 0x9a, 0x66, 0x5e, + 0xd3, 0x65, 0x4d, 0x67, 0x37, 0x2d, 0xa2, 0x70, 0xe0, 0x47, 0x2b, 0xc1, 0xd4, 0x69, 0x4c, 0x6d, + 0x28, 0x1c, 0x26, 0x44, 0xe4, 0x24, 0xef, 0x82, 0xa9, 0x1b, 0x67, 0x7e, 0xdd, 0xd0, 0x51, 0x2a, + 0x24, 0xae, 0xbb, 0x4c, 0x9a, 0xb8, 0x4c, 0x20, 0x01, 0x42, 0xe2, 0xc6, 0x85, 0x1b, 0x17, 0xae, + 0x3b, 0x4e, 0xe2, 0xc2, 0x09, 0xa1, 0x96, 0x3f, 0x04, 0xf9, 0xf5, 0xe3, 0xc4, 0x8e, 0x7f, 0xa6, + 0xea, 0x2d, 0xb6, 0x9f, 0x1f, 0x9f, 0xef, 0xf3, 0x3e, 0x7e, 0xdf, 0x27, 0x86, 0x59, 0x7a, 0x60, + 0x34, 0x0c, 0x93, 0x2a, 0x86, 0xa9, 0x36, 0x74, 0xaa, 0x74, 0x2b, 0xca, 0xc3, 0x7d, 0x6a, 0x3e, + 0x92, 0x3b, 0xa6, 0x61, 0x19, 0x64, 0x0a, 0x1f, 0xcb, 0xce, 0x63, 0xb9, 0x5b, 0x11, 0x17, 0x1b, + 0x06, 0xdb, 0x33, 0x98, 0x52, 0x57, 0x19, 0x75, 0x6c, 0x95, 0x6e, 0xa5, 0x4e, 0x2d, 0xb5, 0xa2, + 0x74, 0xd4, 0x96, 0xd6, 0x56, 0x2d, 0xcd, 0x68, 0x3b, 0xee, 0x62, 0x29, 0x18, 0x5d, 0x6b, 0x37, + 0xe9, 0x41, 0xcd, 0xa4, 0x0d, 0xda, 0xb6, 0x6a, 0x7b, 0xac, 0x85, 0x96, 0xe5, 0x04, 0xcb, 0x8e, + 0x6a, 0xaa, 0x7b, 0x0c, 0x8d, 0xe7, 0x83, 0xc6, 0x76, 0xda, 0x2e, 0xad, 0x59, 0xc6, 0x2e, 0x75, + 0x93, 0xe7, 0x83, 0x56, 0xbe, 0x28, 0x21, 0xd2, 0x3b, 0xa6, 0xd6, 0xa0, 0x31, 0xee, 0xf6, 0x63, + 0xd7, 0x5d, 0x0a, 0x3e, 0x0f, 0xa8, 0x2a, 0x46, 0xda, 0xf8, 0x48, 0xe4, 0xa0, 0x59, 0x57, 0xd5, + 0xb5, 0xa6, 0x6a, 0x19, 0x66, 0x6d, 0xbf, 0xd3, 0x54, 0x2d, 0x5a, 0xab, 0xeb, 0x46, 0x63, 0x17, + 0xed, 0xa7, 0x5b, 0x46, 0xcb, 0xe0, 0x3f, 0x15, 0xfb, 0x17, 0xde, 0xbd, 0xda, 0x32, 0x8c, 0x96, + 0x4e, 0x15, 0xb5, 0xa3, 0x29, 0x6a, 0xbb, 0x6d, 0x58, 0x7c, 0x25, 0x30, 0x87, 0xf4, 0x26, 0xc0, + 0x8e, 0x5d, 0x9c, 0x4d, 0xbb, 0xaa, 0x64, 0x1a, 0xce, 0xf1, 0x52, 0xe5, 0x84, 0x82, 0x50, 0x9a, + 0xa8, 0x3a, 0x17, 0xf6, 0x5d, 0x5e, 0xf4, 0xdc, 0x68, 0x41, 0x28, 0x8d, 0x55, 0x9d, 0x0b, 0x49, + 0x84, 0xdc, 0xb6, 0xbd, 0xcc, 0x7d, 0x77, 0xca, 0xaa, 0xf4, 0xe1, 0x3e, 0x65, 0x96, 0x54, 0x83, + 0x99, 0x90, 0x67, 0xac, 0x63, 0xb4, 0x19, 0x25, 0x1b, 0x70, 0x81, 0xc7, 0xad, 0x69, 0xce, 0x83, + 0x9c, 0x50, 0xc8, 0x94, 0xb2, 0x2b, 0xb3, 0x72, 0xa0, 0xa9, 0xe4, 0xbe, 0x7f, 0x75, 0xd2, 0xf2, + 0xc4, 0x92, 0x56, 0xe1, 0x32, 0x4f, 0xf0, 0x91, 0xa5, 0xee, 0x52, 0xf3, 0xae, 0xc6, 0x2c, 0x4c, + 0x4d, 0x66, 0xe0, 0x25, 0x95, 0x31, 0x6a, 0xd5, 0xb4, 0x26, 0xaa, 0x38, 0xcf, 0xaf, 0x37, 0x9b, + 0x12, 0x83, 0xd7, 0x02, 0x4e, 0xc8, 0x94, 0x83, 0xf3, 0x5d, 0x6a, 0x32, 0xcd, 0x70, 0xa4, 0x67, + 0xaa, 0xee, 0x25, 0x79, 0x07, 0xb2, 0x8c, 0xdb, 0xd7, 0x74, 0x8d, 0x59, 0xbc, 0x04, 0xe1, 0xac, + 0x9e, 0xa8, 0xc0, 0x7a, 0xbf, 0xa5, 0x1d, 0x1f, 0xe9, 0x66, 0xfb, 0x81, 0x91, 0x4c, 0x4a, 0xe6, + 0x7a, 0x49, 0xd5, 0x66, 0xd3, 0xe4, 0x49, 0x27, 0xdc, 0xa8, 0xeb, 0xcd, 0xa6, 0x39, 0x20, 0xc5, + 0x89, 0x3a, 0x84, 0x14, 0xad, 0xfd, 0xc0, 0x48, 0x94, 0xc2, 0xa3, 0x62, 0x52, 0xfb, 0xb7, 0xf4, + 0x75, 0x20, 0x29, 0x4b, 0xa1, 0xe5, 0x03, 0x80, 0xfe, 0x06, 0x80, 0x49, 0x17, 0x64, 0x67, 0xb7, + 0x90, 0xed, 0xdd, 0x42, 0x76, 0x76, 0x16, 0xdc, 0x2d, 0xe4, 0x2d, 0xb5, 0x45, 0x31, 0x6c, 0xd5, + 0xe3, 0x29, 0xfd, 0x29, 0x60, 0xc3, 0xf9, 0xd2, 0x27, 0x8a, 0x7e, 0x0f, 0x26, 0x3d, 0xa2, 0x59, + 0x6e, 0x34, 0xb2, 0xd9, 0x3c, 0xaa, 0xb3, 0x7d, 0xd5, 0x8c, 0xdc, 0xf6, 0x09, 0xc8, 0x70, 0x01, + 0x37, 0x12, 0x05, 0x38, 0x60, 0x3e, 0x05, 0xd3, 0x40, 0xb8, 0x80, 0x2d, 0xfe, 0x92, 0xbb, 0xef, + 0xca, 0x3d, 0xb8, 0xe4, 0xbb, 0x8b, 0x8a, 0xd6, 0x60, 0xdc, 0xd9, 0x0c, 0xb8, 0xa0, 0xec, 0xca, + 0x4c, 0x08, 0xb1, 0xe3, 0xb2, 0x31, 0xf6, 0xfc, 0x9f, 0xb9, 0x91, 0x2a, 0x9a, 0x4b, 0x2b, 0xf0, + 0x2a, 0x8f, 0x77, 0x9b, 0x5a, 0x5b, 0x7c, 0x63, 0xf2, 0xac, 0x11, 0xbe, 0x77, 0xce, 0x1a, 0x8d, + 0x55, 0xcf, 0x3b, 0xef, 0x54, 0x53, 0x5a, 0x03, 0xd1, 0xf5, 0xb9, 0xab, 0x5a, 0x94, 0x39, 0x9e, + 0x29, 0x1c, 0xb7, 0xb1, 0xbb, 0x3d, 0xc9, 0x3c, 0xfc, 0xfc, 0x4e, 0x1c, 0x3f, 0x37, 0xe8, 0xf1, + 0xf3, 0x2b, 0xe9, 0x53, 0xb8, 0x12, 0xca, 0x82, 0x71, 0xdf, 0x86, 0x73, 0xdc, 0x10, 0xc3, 0x5e, + 0x8b, 0x0a, 0xbb, 0xa3, 0xed, 0xd1, 0xaa, 0xb1, 0xdf, 0x6e, 0x62, 0x78, 0xc7, 0x4b, 0xaa, 0x61, + 0x75, 0xd6, 0x75, 0xdd, 0x5f, 0x1d, 0x7f, 0x9b, 0x0a, 0xa7, 0x6e, 0xd3, 0x67, 0x02, 0x96, 0xc4, + 0x93, 0x21, 0xa4, 0x24, 0x99, 0x21, 0x4a, 0x32, 0xd0, 0x81, 0xa3, 0xa7, 0xef, 0xc0, 0x22, 0x5c, + 0x77, 0x6b, 0xfb, 0x89, 0x7b, 0x92, 0x7c, 0xcc, 0x0f, 0x92, 0x0d, 0xfb, 0x1c, 0x71, 0x5b, 0xf2, + 0xb1, 0x00, 0xf3, 0xf1, 0x76, 0xa8, 0xa8, 0x01, 0x97, 0xc3, 0x4f, 0x24, 0x2c, 0xe0, 0x8d, 0x10, + 0x85, 0x61, 0x01, 0x51, 0xef, 0x74, 0x37, 0xe4, 0x99, 0x24, 0x41, 0xc1, 0x85, 0x71, 0x8e, 0x02, + 0x7e, 0x54, 0xfa, 0x5f, 0xa2, 0x6f, 0xe0, 0x5a, 0x8c, 0x0d, 0xd2, 0xde, 0x87, 0x4b, 0x21, 0xc3, + 0x03, 0xa2, 0xce, 0x87, 0xa0, 0x06, 0x42, 0x21, 0xe7, 0x94, 0x36, 0xf8, 0x40, 0x9a, 0x83, 0xd9, + 0x10, 0x80, 0x0f, 0x59, 0xcb, 0x25, 0x64, 0x90, 0x8f, 0x32, 0x40, 0xbc, 0x6d, 0x78, 0x65, 0x70, + 0x0a, 0x8a, 0x69, 0x72, 0x7f, 0x10, 0x04, 0xbb, 0xa8, 0xf9, 0xee, 0x4a, 0xcb, 0xb8, 0x65, 0xde, + 0xa6, 0xd6, 0x20, 0x90, 0x7d, 0xaa, 0xf7, 0x97, 0x6a, 0xac, 0xea, 0x5c, 0x48, 0x9f, 0xe1, 0xc9, + 0xed, 0xf7, 0x40, 0xc2, 0x75, 0x80, 0x00, 0xdb, 0xd5, 0x10, 0xb6, 0x41, 0xac, 0x09, 0xb3, 0x47, + 0x54, 0x47, 0xa2, 0x75, 0x5d, 0x0f, 0x10, 0x9d, 0xd5, 0x2b, 0xf8, 0xab, 0x80, 0x22, 0xfc, 0x49, + 0x22, 0x44, 0x64, 0x86, 0x16, 0x71, 0x76, 0xef, 0xe3, 0x6a, 0x7f, 0xaf, 0x0b, 0xe9, 0xea, 0x88, + 0x25, 0xfa, 0x02, 0xae, 0x86, 0x3b, 0xa1, 0xc0, 0x3b, 0x70, 0x21, 0xac, 0xc1, 0xe7, 0x22, 0x35, + 0xfa, 0x7a, 0x7b, 0xd2, 0xf4, 0xb6, 0x35, 0x45, 0xc0, 0x5e, 0x25, 0xfd, 0x80, 0x67, 0xb5, 0x62, + 0xbf, 0x0b, 0xa8, 0x29, 0x90, 0x27, 0x5a, 0x53, 0xe6, 0x94, 0x9a, 0xce, 0x6c, 0xf5, 0x56, 0x7e, + 0x98, 0x82, 0x73, 0x9c, 0x9a, 0x3c, 0x15, 0x60, 0xd2, 0x3b, 0xeb, 0x92, 0x72, 0x08, 0x58, 0xd4, + 0xb4, 0x2c, 0x2e, 0xa5, 0x33, 0x76, 0x08, 0xa4, 0xd2, 0xb7, 0x7f, 0xfd, 0xf7, 0x74, 0x54, 0x22, + 0x05, 0x25, 0xf8, 0xf7, 0xc0, 0x37, 0x57, 0x93, 0xc7, 0x02, 0x40, 0x7f, 0x2a, 0x25, 0x37, 0xa3, + 0xd2, 0x04, 0x86, 0x68, 0x71, 0x31, 0x8d, 0x29, 0xf2, 0x2c, 0x70, 0x9e, 0x02, 0xc9, 0x87, 0xf0, + 0x78, 0x26, 0x67, 0xf2, 0x4c, 0x80, 0xac, 0x67, 0x74, 0x23, 0x09, 0x39, 0xbc, 0xe3, 0xa5, 0x58, + 0x4e, 0x65, 0x8b, 0x40, 0x2b, 0x1c, 0x68, 0x89, 0x2c, 0x46, 0x03, 0xf1, 0x51, 0x50, 0x39, 0x74, + 0x47, 0xd6, 0x23, 0xf2, 0x4b, 0xaf, 0x54, 0x76, 0xac, 0xa4, 0x52, 0x79, 0xa6, 0x78, 0x71, 0x31, + 0x8d, 0x29, 0x92, 0xbd, 0xcb, 0xc9, 0xde, 0x22, 0x6b, 0xf1, 0x64, 0x1e, 0x30, 0xe5, 0xd0, 0xf3, + 0x3f, 0xe0, 0x88, 0x7c, 0x05, 0xe3, 0xd8, 0xc4, 0xc5, 0xa8, 0xb4, 0xbe, 0x17, 0x54, 0x5c, 0x48, + 0x32, 0x43, 0xb2, 0x6b, 0x9c, 0xec, 0x0a, 0x99, 0x51, 0xa2, 0xfe, 0x1d, 0xdb, 0xdd, 0x34, 0xee, + 0x4c, 0x27, 0xa4, 0x14, 0x15, 0x75, 0x70, 0xe6, 0x14, 0x6f, 0xa6, 0xb0, 0x44, 0x84, 0x25, 0x8e, + 0xb0, 0x40, 0xe6, 0x95, 0xa8, 0x7f, 0xd8, 0xca, 0xa1, 0x3b, 0x86, 0x1e, 0x91, 0x1f, 0x05, 0xc8, + 0x7a, 0xc6, 0x43, 0x72, 0x2b, 0x26, 0x51, 0x70, 0xa4, 0x15, 0xe5, 0xb4, 0xe6, 0x29, 0x7a, 0x4a, + 0xe7, 0xf6, 0x35, 0xce, 0xe8, 0x45, 0xfc, 0x43, 0x80, 0xe9, 0xb0, 0x61, 0x87, 0xbc, 0x11, 0x93, + 0x3c, 0x66, 0x2c, 0x13, 0xd7, 0x86, 0xf6, 0x43, 0xfa, 0x0a, 0xa7, 0x2f, 0x93, 0x9b, 0x4a, 0xda, + 0x2f, 0x0a, 0xe4, 0x37, 0x01, 0xa6, 0x02, 0xe3, 0x0f, 0x59, 0x8d, 0x21, 0x88, 0x9a, 0xcd, 0xc4, + 0xd7, 0x87, 0x73, 0x42, 0x66, 0x99, 0x33, 0x97, 0xc8, 0x82, 0x92, 0xea, 0x13, 0x10, 0xf9, 0x59, + 0x80, 0x8b, 0xfe, 0x99, 0x88, 0x2c, 0xa7, 0x4b, 0xdc, 0x9f, 0x40, 0xc4, 0xca, 0x10, 0x1e, 0xc8, + 0x59, 0xe6, 0x9c, 0x45, 0x72, 0x5d, 0x49, 0xfe, 0xa8, 0x45, 0xbe, 0x13, 0x60, 0xa2, 0xcf, 0x57, + 0x8e, 0xc9, 0x16, 0x40, 0x5b, 0x4a, 0x67, 0x8c, 0x54, 0xb7, 0x38, 0xd5, 0x0d, 0x52, 0x54, 0xe2, + 0x3e, 0x47, 0x29, 0x87, 0x7c, 0xb1, 0x8f, 0xc8, 0x13, 0x01, 0x26, 0x7b, 0x41, 0xd6, 0x75, 0x3d, + 0x1a, 0x2d, 0x64, 0x6e, 0x8b, 0x46, 0x0b, 0x9b, 0xbf, 0xa4, 0x22, 0x47, 0x9b, 0x23, 0xb3, 0xb1, + 0x68, 0xe4, 0xa7, 0x1e, 0x12, 0xf6, 0x9e, 0x9c, 0x58, 0x00, 0x7f, 0xdb, 0x29, 0xa9, 0xed, 0x11, + 0x6c, 0x99, 0x83, 0x2d, 0x92, 0x92, 0x92, 0xf0, 0x79, 0xae, 0x57, 0xb6, 0xef, 0x05, 0x78, 0xd9, + 0x1b, 0xca, 0xae, 0x9c, 0x9c, 0x58, 0x8c, 0x94, 0x98, 0x11, 0xa3, 0x50, 0xec, 0xf9, 0xef, 0xc3, + 0xdc, 0xb8, 0xf3, 0xfc, 0x38, 0x2f, 0xbc, 0x38, 0xce, 0x0b, 0xff, 0x1e, 0xe7, 0x85, 0x27, 0x27, + 0xf9, 0x91, 0x17, 0x27, 0xf9, 0x91, 0xbf, 0x4f, 0xf2, 0x23, 0xf7, 0x97, 0x5b, 0x9a, 0xf5, 0xf9, + 0x7e, 0x5d, 0x6e, 0x18, 0x7b, 0xca, 0xfb, 0x4e, 0x94, 0x7b, 0xd4, 0xfa, 0xd2, 0x30, 0x77, 0x7b, + 0x41, 0x0f, 0xdc, 0xb0, 0xd6, 0xa3, 0x0e, 0x65, 0xf5, 0x71, 0xfe, 0xb9, 0x70, 0xf5, 0xff, 0x00, + 0x00, 0x00, 0xff, 0xff, 0xd8, 0x38, 0xb6, 0xc4, 0x19, 0x16, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -2332,7 +2360,12 @@ func (m *QueryStakerListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) i = encodeVarintQuery(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x12 + } + if m.Version != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Version)) + i-- + dAtA[i] = 0x8 } return len(dAtA) - i, nil } @@ -2404,7 +2437,12 @@ func (m *QueryStakerInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) i = encodeVarintQuery(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x12 + } + if m.Version != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Version)) + i-- + dAtA[i] = 0x8 } return len(dAtA) - i, nil } @@ -2481,7 +2519,7 @@ func (m *QueryStakerInfosResponse) MarshalToSizedBuffer(dAtA []byte) (int, error i = encodeVarintQuery(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x1a } if len(m.StakerInfos) > 0 { for iNdEx := len(m.StakerInfos) - 1; iNdEx >= 0; iNdEx-- { @@ -2494,9 +2532,14 @@ func (m *QueryStakerInfosResponse) MarshalToSizedBuffer(dAtA []byte) (int, error i = encodeVarintQuery(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x12 } } + if m.Version != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Version)) + i-- + dAtA[i] = 0x8 + } return len(dAtA) - i, nil } @@ -3290,6 +3333,9 @@ func (m *QueryStakerListResponse) Size() (n int) { } var l int _ = l + if m.Version != 0 { + n += 1 + sovQuery(uint64(m.Version)) + } if m.StakerList != nil { l = m.StakerList.Size() n += 1 + l + sovQuery(uint64(l)) @@ -3320,6 +3366,9 @@ func (m *QueryStakerInfoResponse) Size() (n int) { } var l int _ = l + if m.Version != 0 { + n += 1 + sovQuery(uint64(m.Version)) + } if m.StakerInfo != nil { l = m.StakerInfo.Size() n += 1 + l + sovQuery(uint64(l)) @@ -3350,6 +3399,9 @@ func (m *QueryStakerInfosResponse) Size() (n int) { } var l int _ = l + if m.Version != 0 { + n += 1 + sovQuery(uint64(m.Version)) + } if len(m.StakerInfos) > 0 { for _, e := range m.StakerInfos { l = e.Size() @@ -3984,6 +4036,25 @@ func (m *QueryStakerListResponse) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field StakerList", wireType) } @@ -4184,6 +4255,25 @@ func (m *QueryStakerInfoResponse) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field StakerInfo", wireType) } @@ -4388,6 +4478,25 @@ func (m *QueryStakerInfosResponse) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field StakerInfos", wireType) } @@ -4421,7 +4530,7 @@ func (m *QueryStakerInfosResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) } diff --git a/x/oracle/types/types.go b/x/oracle/types/types.go index cc6c0a18a..a93d980f6 100644 --- a/x/oracle/types/types.go +++ b/x/oracle/types/types.go @@ -4,6 +4,7 @@ import ( "encoding/binary" sdkmath "cosmossdk.io/math" + sdk "github.com/cosmos/cosmos-sdk/types" ) type OracleInfo struct { @@ -43,19 +44,56 @@ type AggFinalPrice struct { Price string } +type NSTType string + const ( + NSTIDPrefix = "nst_" + ETHChain NSTType = "eth" + SOLANAChain NSTType = "solana" + + ETHMainnetChainID = "0x7595" + ETHLocalnetChainID = "0x65" + ETHHoleskyChainID = "0x9d19" + ETHSepoliaChainID = "0x9ce1" + NSTETHAssetAddr = "0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee" + DefaultPriceValue = 1 DefaultPriceDecimal = 0 - NSTIDPrefix = "NST" - MaxPageLimit = 100 + + SourceChainlinkName = "Chainlink" + SourceChainlinkID = 1 + TimeLayout = "2006-01-02 15:04:05" + + DelimiterForCombinedKey = byte('/') ) -var DelimiterForCombinedKey = byte('/') +var ( + NSTChains = map[NSTType][]string{ + ETHChain: {ETHMainnetChainID, ETHLocalnetChainID, ETHHoleskyChainID, ETHSepoliaChainID}, + } + NSTChainsInverted = map[string]NSTType{ + ETHMainnetChainID: ETHChain, + ETHLocalnetChainID: ETHChain, + ETHHoleskyChainID: ETHChain, + ETHSepoliaChainID: ETHChain, + } + NSTAssetAddr = map[NSTType]string{ + ETHChain: NSTETHAssetAddr, + } +) func Uint64Bytes(value uint64) []byte { valueBytes := make([]byte, 8) binary.BigEndian.PutUint64(valueBytes, value) return valueBytes } + +func ConsAddrStrFromCreator(creator string) (string, error) { + accAddress, err := sdk.AccAddressFromBech32(creator) + if err != nil { + return "", err + } + return sdk.ConsAddress(accAddress).String(), nil +}