diff --git a/.dockerignore b/.dockerignore index bd59680c27..709a6dacb4 100644 --- a/.dockerignore +++ b/.dockerignore @@ -8,5 +8,7 @@ /smart_contracts/ethereum/solidity_firefly/cache /firefly /firefly-nocgo +/docs/vendor +/docs/_site coverage.txt codecov.yml diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 8b26c4dbc3..139064f6a6 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -18,9 +18,9 @@ jobs: fetch-depth: 0 - name: Set up Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.18 + go-version: 1.21 - name: Build and Test run: make @@ -36,9 +36,9 @@ jobs: fetch-depth: 0 - name: Set up Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.18 + go-version: 1.21 - name: Build Docker image run: make docker @@ -117,7 +117,7 @@ jobs: - stack-type: ethereum blockchain-connector: ethconnect - test-suite: TestEthereumGatewayE2ESuite + test-suite: TestEthereumGatewayLegacyEthE2ESuite database-type: sqlite3 token-provider: erc1155 multiparty-enabled: false @@ -128,9 +128,9 @@ jobs: fetch-depth: 0 - name: Set up Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.18 + go-version: 1.21 - name: Download Docker image uses: actions/download-artifact@v3 diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index ad456657ba..8d3252bde8 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -52,9 +52,9 @@ jobs: fetch-depth: 0 - name: Set up Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.18 + go-version: 1.21 - name: Update manifest to latest commit for every service run: ./manifestgen.sh head @@ -89,9 +89,9 @@ jobs: fetch-depth: 0 - name: Set up Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.18 + go-version: 1.21 - name: Update manifest to latest commit for every service run: ./manifestgen.sh head diff --git a/.github/workflows/solidity.yml b/.github/workflows/solidity.yml index f93c68cbe9..ebb7d21c8a 100644 --- a/.github/workflows/solidity.yml +++ b/.github/workflows/solidity.yml @@ -8,11 +8,11 @@ jobs: solidity-test: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Use Node.js uses: actions/setup-node@v2 with: - node-version: '16.x' + node-version: "16.x" - run: npm ci working-directory: ./smart_contracts/ethereum/solidity_firefly - run: npm run test diff --git a/.golangci.yml b/.golangci.yml index 551df33fb3..e5221806bc 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -3,6 +3,7 @@ run: skip-dirs: - "mocks" - "ffconfig" + - "test/e2e" linters-settings: golint: {} gocritic: @@ -12,7 +13,10 @@ linters-settings: revive: rules: - name: unused-parameter - disabled: true + disabled: true + gosec: + excludes: + - G601 # Appears not to handle taking an address of a sub-structure, within a pointer to a structure within a loop. Which is valid and safe. goheader: values: regexp: @@ -38,7 +42,6 @@ linters: disable: - structcheck enable: - - depguard - dogsled - errcheck - goconst diff --git a/.vscode/settings.json b/.vscode/settings.json index 4be21622e9..80cdf3b7bb 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -54,6 +54,7 @@ "mytag", "NATS", "nextpins", + "openapi", "opid", "optype", "opupdate", diff --git a/CODEOWNERS b/CODEOWNERS index 1598c39d1f..d6c9ce4e77 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,7 +1,3 @@ # SPDX-License-Identifier: Apache-2.0 -# FireFly Core Maintainers -* @peterbroadhurst @nguyer @awrichar @shorsher - -# FireFly Documentation Maintainers -/docs @peterbroadhurst @nguyer @awrichar @shorsher @nickgaski \ No newline at end of file +* @hyperledger/firefly-core-maintainers diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 2e07f068fc..aa40d41329 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -1,4 +1,4 @@ -# Maintainers +# firefly-core-maintainers The following is the list of current maintainers this repo: @@ -9,45 +9,6 @@ The following is the list of current maintainers this repo: | Andrew Richardson | awrichar | andrew.richardson@kaleido.io | Andrew.Richardson | | Alex Shorsher | shorsher | alex.shorsher@kaleido.io | shorsher | - This list is to be kept up to date as maintainers are added or removed. -# Expectations of Maintainers - -Maintainers are expected to regularly: - -- Make contributions to FireFly code repositories including code or documentation -- Review pull requests -- Investigate open GitHub issues -- Participate in Community Calls - -# Becoming a Maintainer - -The FireFly Project welcomes and encourages people to become maintainers of the project if they are interested and meet the following criteria: - -## Criteria for becoming a member - -- Expressed interest and commitment to meet the expectations of a maintainer for at least 6 months -- A consistent track record of contributions to FireFly code repositories which could be: - - Enhancements - - Bug fixes - - Tests - - Documentation -- A consistent track record of helpful code reviews on FireFly code repositories -- Regular participation in Community Calls -- A demonstrated interest and aptitude in thought leadership within the FireFly Project -- Sponsorship from an existing maintainer - -There is no specific quantity of contributions or pull requests, or a specific time period over which the candidate must prove their track record. This will be left up to the discretion of the existing maintainers. - -## Process for becoming a maintainer - -Once the above criteria have been met, the sponsoring maintainer shall propose the addition of the new maintainer at a public Community Call. Existing maintainers shall vote at the next public Community Call whether the new maintainer should be added or not. Proxy votes may be submitted via email _before_ the meeting. A simple majority of the existing maintainers is required for the vote to pass. - -## Maintainer resignation - -While maintainers are expected in good faith to be committed to the project for a significant period of time, they are under no binding obligation to do so. Maintainers may resign at any time for any reason. If a maintainer wishes to resign they shall open a pull request to update the maintainers list removing themselves from the list. - -## Maintainer inactivity - -If a maintainer has remained inactive (not meeting the expectations of a maintainer) for a period of time (at least several months), an email should be sent to that maintainer noting their inactivity and asking if they still wish to be a maintainer. If they continue to be inactive after being notified via email, an existing maintainer may propose to remove the inactive maintainer at a public Community Call. Existing maintainers shall vote at the next public Community Call whether the inactive maintainer should be removed or not. Proxy votes may be submitted via email _before_ the meeting. A simple majority of the existing maintainers is required for the vote to pass. +For the full list of maintainers across all repos, the expectations of a maintainer and the process for becoming a maintainer, please see the [FireFly Maintainers page on the Hyperledger Wiki](https://wiki.hyperledger.org/display/FIR/Maintainers). diff --git a/Makefile b/Makefile index 7bab3d0dda..dc8f69c084 100644 --- a/Makefile +++ b/Makefile @@ -23,7 +23,7 @@ lint: ${LINT} ${MOCKERY}: $(VGO) install github.com/vektra/mockery/v2@latest ${LINT}: - $(VGO) install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.47.3 + $(VGO) install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.54.0 ffcommon: $(eval WSCLIENT_PATH := $(shell $(VGO) list -f '{{.Dir}}' github.com/hyperledger/firefly-common/pkg/wsclient)) @@ -52,6 +52,7 @@ $(eval $(call makemock, pkg/dataexchange, Callbacks, dataex $(eval $(call makemock, pkg/tokens, Plugin, tokenmocks)) $(eval $(call makemock, pkg/tokens, Callbacks, tokenmocks)) $(eval $(call makemock, internal/txcommon, Helper, txcommonmocks)) +$(eval $(call makemock, internal/txwriter, Writer, txwritermocks)) $(eval $(call makemock, internal/identity, Manager, identitymanagermocks)) $(eval $(call makemock, internal/syncasync, Sender, syncasyncmocks)) $(eval $(call makemock, internal/syncasync, Bridge, syncasyncmocks)) @@ -72,12 +73,13 @@ $(eval $(call makemock, internal/assets, Manager, assetm $(eval $(call makemock, internal/contracts, Manager, contractmocks)) $(eval $(call makemock, internal/spievents, Manager, spieventsmocks)) $(eval $(call makemock, internal/orchestrator, Orchestrator, orchestratormocks)) -$(eval $(call makemock, internal/apiserver, FFISwaggerGen, apiservermocks)) -$(eval $(call makemock, internal/apiserver, Server, apiservermocks)) $(eval $(call makemock, internal/cache, Manager, cachemocks)) $(eval $(call makemock, internal/metrics, Manager, metricsmocks)) $(eval $(call makemock, internal/operations, Manager, operationmocks)) $(eval $(call makemock, internal/multiparty, Manager, multipartymocks)) +$(eval $(call makemock, internal/apiserver, FFISwaggerGen, apiservermocks)) +$(eval $(call makemock, internal/apiserver, Server, apiservermocks)) +$(eval $(call makemock, internal/events/websockets, WebSocketsNamespaced, websocketsmocks)) firefly-nocgo: ${GOFILES} CGO_ENABLED=0 $(VGO) build -o ${BINARY_NAME}-nocgo -ldflags "-X main.buildDate=$(DATE) -X main.buildVersion=$(BUILD_VERSION) -X 'github.com/hyperledger/firefly/cmd.BuildVersionOverride=$(BUILD_VERSION)' -X 'github.com/hyperledger/firefly/cmd.BuildDate=$(DATE)' -X 'github.com/hyperledger/firefly/cmd.BuildCommit=$(GIT_REF)'" -tags=prod -tags=prod -v diff --git a/README.md b/README.md index b2fdb1f681..341b26f518 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,7 @@ [![Go Report Card](https://goreportcard.com/badge/github.com/hyperledger/firefly)](https://goreportcard.com/report/github.com/hyperledger/firefly) [![FireFy Documentation](https://img.shields.io/static/v1?label=FireFly&message=documentation&color=informational)](https://hyperledger.github.io/firefly//) ![build](https://github.com/hyperledger/firefly/actions/workflows/docker_main.yml/badge.svg?branch=main) +[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/7826/badge)](https://www.bestpractices.dev/projects/7826) ![Hyperledger FireFly](./images/hyperledger_firefly_logo.png) @@ -26,7 +27,7 @@ FireFly CLI | FireFly Explorer UI | FireFly Sa ## Engage with the community -- [Join us on Discord](https://discord.gg/Fy7MJuqw86) +- [Join us on Discord](https://discord.gg/hyperledger) ## Technical architecture diff --git a/db/migrations/postgres/000109_fix_tokentransfer_index.down.sql b/db/migrations/postgres/000109_fix_tokentransfer_index.down.sql new file mode 100644 index 0000000000..a08e72a53c --- /dev/null +++ b/db/migrations/postgres/000109_fix_tokentransfer_index.down.sql @@ -0,0 +1 @@ +-- no down migration \ No newline at end of file diff --git a/db/migrations/postgres/000109_fix_tokentransfer_index.up.sql b/db/migrations/postgres/000109_fix_tokentransfer_index.up.sql new file mode 100644 index 0000000000..e9322e0766 --- /dev/null +++ b/db/migrations/postgres/000109_fix_tokentransfer_index.up.sql @@ -0,0 +1,7 @@ +BEGIN; +DROP INDEX tokentransfer_protocolid; +DROP INDEX tokenapproval_protocolid; + +CREATE UNIQUE INDEX tokentransfer_protocolid ON tokentransfer(namespace, connector, protocol_id); +CREATE UNIQUE INDEX tokenapproval_protocolid ON tokenapproval(namespace, connector, protocol_id); +COMMIT; diff --git a/db/migrations/postgres/000110_add_tokenpool_networkname.down.sql b/db/migrations/postgres/000110_add_tokenpool_networkname.down.sql new file mode 100644 index 0000000000..b3e1df0606 --- /dev/null +++ b/db/migrations/postgres/000110_add_tokenpool_networkname.down.sql @@ -0,0 +1,6 @@ +BEGIN; +DROP INDEX tokenpool_networkname; +ALTER TABLE tokenpool DROP COLUMN published; +ALTER TABLE tokenpool DROP COLUMN network_name; +ALTER TABLE tokenpool DROP COLUMN plugin_data; +COMMIT; diff --git a/db/migrations/postgres/000110_add_tokenpool_networkname.up.sql b/db/migrations/postgres/000110_add_tokenpool_networkname.up.sql new file mode 100644 index 0000000000..1b8d107331 --- /dev/null +++ b/db/migrations/postgres/000110_add_tokenpool_networkname.up.sql @@ -0,0 +1,12 @@ +BEGIN; +ALTER TABLE tokenpool ADD COLUMN published BOOLEAN DEFAULT false; +UPDATE tokenpool SET published = true WHERE message_id IS NOT NULL; + +ALTER TABLE tokenpool ADD COLUMN network_name VARCHAR(64); +UPDATE tokenpool SET network_name = name WHERE message_id IS NOT NULL; + +ALTER TABLE tokenpool ADD COLUMN plugin_data TEXT; +UPDATE tokenpool SET plugin_data = namespace; + +CREATE UNIQUE INDEX tokenpool_networkname ON tokenpool(namespace,network_name); +COMMIT; diff --git a/db/migrations/postgres/000111_adjust_token_indexes.down.sql b/db/migrations/postgres/000111_adjust_token_indexes.down.sql new file mode 100644 index 0000000000..158779db78 --- /dev/null +++ b/db/migrations/postgres/000111_adjust_token_indexes.down.sql @@ -0,0 +1,8 @@ +BEGIN; +DROP INDEX tokentransfer_protocolid; +DROP INDEX tokenapproval_protocolid; + +CREATE UNIQUE INDEX tokentransfer_protocolid ON tokentransfer(namespace, connector, protocol_id); +CREATE UNIQUE INDEX tokenapproval_protocolid ON tokenapproval(namespace, connector, protocol_id); +CREATE INDEX tokenpool_locator ON tokenpool(namespace, connector, locator); +COMMIT; diff --git a/db/migrations/postgres/000111_adjust_token_indexes.up.sql b/db/migrations/postgres/000111_adjust_token_indexes.up.sql new file mode 100644 index 0000000000..6bf7c59e09 --- /dev/null +++ b/db/migrations/postgres/000111_adjust_token_indexes.up.sql @@ -0,0 +1,8 @@ +BEGIN; +DROP INDEX tokenpool_locator; +DROP INDEX tokentransfer_protocolid; +DROP INDEX tokenapproval_protocolid; + +CREATE UNIQUE INDEX tokentransfer_protocolid ON tokentransfer(namespace, pool_id, protocol_id); +CREATE UNIQUE INDEX tokenapproval_protocolid ON tokenapproval(namespace, pool_id, protocol_id); +COMMIT; diff --git a/db/migrations/postgres/000112_add_ffi_networkname.down.sql b/db/migrations/postgres/000112_add_ffi_networkname.down.sql new file mode 100644 index 0000000000..1f4f4248ba --- /dev/null +++ b/db/migrations/postgres/000112_add_ffi_networkname.down.sql @@ -0,0 +1,5 @@ +BEGIN; +DROP INDEX ffi_networkname; +ALTER TABLE ffi DROP COLUMN published; +ALTER TABLE ffi DROP COLUMN network_name; +COMMIT; diff --git a/db/migrations/postgres/000112_add_ffi_networkname.up.sql b/db/migrations/postgres/000112_add_ffi_networkname.up.sql new file mode 100644 index 0000000000..e4136a0954 --- /dev/null +++ b/db/migrations/postgres/000112_add_ffi_networkname.up.sql @@ -0,0 +1,7 @@ +BEGIN; +ALTER TABLE ffi ADD COLUMN published BOOLEAN DEFAULT false; +UPDATE ffi SET published = true WHERE message_id IS NOT NULL; +ALTER TABLE ffi ADD COLUMN network_name VARCHAR(64); +UPDATE ffi SET network_name = name WHERE message_id IS NOT NULL; +CREATE UNIQUE INDEX ffi_networkname ON ffi(namespace,network_name,version); +COMMIT; diff --git a/db/migrations/postgres/000113_add_contract_api_networkname.down.sql b/db/migrations/postgres/000113_add_contract_api_networkname.down.sql new file mode 100644 index 0000000000..6501e2f292 --- /dev/null +++ b/db/migrations/postgres/000113_add_contract_api_networkname.down.sql @@ -0,0 +1,5 @@ +BEGIN; +DROP INDEX contractapis_networkname; +ALTER TABLE contractapis DROP COLUMN published; +ALTER TABLE contractapis DROP COLUMN network_name; +COMMIT; diff --git a/db/migrations/postgres/000113_add_contract_api_networkname.up.sql b/db/migrations/postgres/000113_add_contract_api_networkname.up.sql new file mode 100644 index 0000000000..ea78a5b67d --- /dev/null +++ b/db/migrations/postgres/000113_add_contract_api_networkname.up.sql @@ -0,0 +1,7 @@ +BEGIN; +ALTER TABLE contractapis ADD COLUMN published BOOLEAN DEFAULT false; +UPDATE contractapis SET published = true WHERE message_id IS NOT NULL; +ALTER TABLE contractapis ADD COLUMN network_name VARCHAR(64); +UPDATE contractapis SET network_name = name WHERE message_id IS NOT NULL; +CREATE UNIQUE INDEX contractapis_networkname ON contractapis(namespace,network_name); +COMMIT; diff --git a/db/migrations/postgres/000114_add_message_reject_reason.down.sql b/db/migrations/postgres/000114_add_message_reject_reason.down.sql new file mode 100644 index 0000000000..6a13fe6b7f --- /dev/null +++ b/db/migrations/postgres/000114_add_message_reject_reason.down.sql @@ -0,0 +1,3 @@ +BEGIN; +ALTER TABLE messages DROP COLUMN reject_reason; +COMMIT; diff --git a/db/migrations/postgres/000114_add_message_reject_reason.up.sql b/db/migrations/postgres/000114_add_message_reject_reason.up.sql new file mode 100644 index 0000000000..85458056e3 --- /dev/null +++ b/db/migrations/postgres/000114_add_message_reject_reason.up.sql @@ -0,0 +1,3 @@ +BEGIN; +ALTER TABLE messages ADD COLUMN reject_reason TEXT DEFAULT ''; +COMMIT; diff --git a/db/migrations/postgres/000115_rename_tokenpool_state.down.sql b/db/migrations/postgres/000115_rename_tokenpool_state.down.sql new file mode 100644 index 0000000000..34c5abd63e --- /dev/null +++ b/db/migrations/postgres/000115_rename_tokenpool_state.down.sql @@ -0,0 +1,5 @@ +BEGIN; +ALTER TABLE tokenpool ADD COLUMN state VARCHAR(64); +UPDATE tokenpool SET state = (CASE WHEN active = true THEN 'confirmed' ELSE 'pending' END); +ALTER TABLE tokenpool DROP COLUMN active; +COMMIT; diff --git a/db/migrations/postgres/000115_rename_tokenpool_state.up.sql b/db/migrations/postgres/000115_rename_tokenpool_state.up.sql new file mode 100644 index 0000000000..4d0d832136 --- /dev/null +++ b/db/migrations/postgres/000115_rename_tokenpool_state.up.sql @@ -0,0 +1,5 @@ +BEGIN; +ALTER TABLE tokenpool ADD COLUMN active BOOLEAN; +UPDATE tokenpool SET active = (CASE WHEN state = 'confirmed' THEN true ELSE false END); +ALTER TABLE tokenpool DROP COLUMN state; +COMMIT; diff --git a/db/migrations/sqlite/000109_fix_tokentransfer_index.down.sql b/db/migrations/sqlite/000109_fix_tokentransfer_index.down.sql new file mode 100644 index 0000000000..a08e72a53c --- /dev/null +++ b/db/migrations/sqlite/000109_fix_tokentransfer_index.down.sql @@ -0,0 +1 @@ +-- no down migration \ No newline at end of file diff --git a/db/migrations/sqlite/000109_fix_tokentransfer_index.up.sql b/db/migrations/sqlite/000109_fix_tokentransfer_index.up.sql new file mode 100644 index 0000000000..1a3d771082 --- /dev/null +++ b/db/migrations/sqlite/000109_fix_tokentransfer_index.up.sql @@ -0,0 +1,5 @@ +DROP INDEX tokentransfer_protocolid; +DROP INDEX tokenapproval_protocolid; + +CREATE UNIQUE INDEX tokentransfer_protocolid ON tokentransfer(namespace, connector, protocol_id); +CREATE UNIQUE INDEX tokenapproval_protocolid ON tokenapproval(namespace, connector, protocol_id); diff --git a/db/migrations/sqlite/000110_add_tokenpool_networkname.down.sql b/db/migrations/sqlite/000110_add_tokenpool_networkname.down.sql new file mode 100644 index 0000000000..9991e95c10 --- /dev/null +++ b/db/migrations/sqlite/000110_add_tokenpool_networkname.down.sql @@ -0,0 +1,4 @@ +DROP INDEX tokenpool_networkname; +ALTER TABLE tokenpool DROP COLUMN published; +ALTER TABLE tokenpool DROP COLUMN network_name; +ALTER TABLE tokenpool DROP COLUMN plugin_data; diff --git a/db/migrations/sqlite/000110_add_tokenpool_networkname.up.sql b/db/migrations/sqlite/000110_add_tokenpool_networkname.up.sql new file mode 100644 index 0000000000..6b2fda0bba --- /dev/null +++ b/db/migrations/sqlite/000110_add_tokenpool_networkname.up.sql @@ -0,0 +1,10 @@ +ALTER TABLE tokenpool ADD COLUMN published BOOLEAN DEFAULT false; +UPDATE tokenpool SET published = true WHERE message_id IS NOT NULL; + +ALTER TABLE tokenpool ADD COLUMN network_name VARCHAR(64); +UPDATE tokenpool SET network_name = name WHERE message_id IS NOT NULL; + +ALTER TABLE tokenpool ADD COLUMN plugin_data TEXT; +UPDATE tokenpool SET plugin_data = namespace; + +CREATE UNIQUE INDEX tokenpool_networkname ON tokenpool(namespace,network_name); diff --git a/db/migrations/sqlite/000111_adjust_token_indexes.down.sql b/db/migrations/sqlite/000111_adjust_token_indexes.down.sql new file mode 100644 index 0000000000..505924d03c --- /dev/null +++ b/db/migrations/sqlite/000111_adjust_token_indexes.down.sql @@ -0,0 +1,6 @@ +DROP INDEX tokentransfer_protocolid; +DROP INDEX tokenapproval_protocolid; + +CREATE UNIQUE INDEX tokentransfer_protocolid ON tokentransfer(namespace, connector, protocol_id); +CREATE UNIQUE INDEX tokenapproval_protocolid ON tokenapproval(namespace, connector, protocol_id); +CREATE INDEX tokenpool_locator ON tokenpool(namespace, connector, locator); diff --git a/db/migrations/sqlite/000111_adjust_token_indexes.up.sql b/db/migrations/sqlite/000111_adjust_token_indexes.up.sql new file mode 100644 index 0000000000..c83bfeeff6 --- /dev/null +++ b/db/migrations/sqlite/000111_adjust_token_indexes.up.sql @@ -0,0 +1,6 @@ +DROP INDEX tokenpool_locator; +DROP INDEX tokentransfer_protocolid; +DROP INDEX tokenapproval_protocolid; + +CREATE UNIQUE INDEX tokentransfer_protocolid ON tokentransfer(namespace, pool_id, protocol_id); +CREATE UNIQUE INDEX tokenapproval_protocolid ON tokenapproval(namespace, pool_id, protocol_id); diff --git a/db/migrations/sqlite/000112_add_ffi_networkname.down.sql b/db/migrations/sqlite/000112_add_ffi_networkname.down.sql new file mode 100644 index 0000000000..c3e86401e1 --- /dev/null +++ b/db/migrations/sqlite/000112_add_ffi_networkname.down.sql @@ -0,0 +1,3 @@ +DROP INDEX ffi_networkname; +ALTER TABLE ffi DROP COLUMN published; +ALTER TABLE ffi DROP COLUMN network_name; diff --git a/db/migrations/sqlite/000112_add_ffi_networkname.up.sql b/db/migrations/sqlite/000112_add_ffi_networkname.up.sql new file mode 100644 index 0000000000..292ab579d6 --- /dev/null +++ b/db/migrations/sqlite/000112_add_ffi_networkname.up.sql @@ -0,0 +1,5 @@ +ALTER TABLE ffi ADD COLUMN published BOOLEAN DEFAULT false; +UPDATE ffi SET published = true WHERE message_id IS NOT NULL; +ALTER TABLE ffi ADD COLUMN network_name VARCHAR(64); +UPDATE ffi SET network_name = name WHERE message_id IS NOT NULL; +CREATE UNIQUE INDEX ffi_networkname ON ffi(namespace,network_name,version); diff --git a/db/migrations/sqlite/000113_add_contract_api_networkname.down.sql b/db/migrations/sqlite/000113_add_contract_api_networkname.down.sql new file mode 100644 index 0000000000..63dddbc757 --- /dev/null +++ b/db/migrations/sqlite/000113_add_contract_api_networkname.down.sql @@ -0,0 +1,3 @@ +DROP INDEX contractapis_networkname; +ALTER TABLE contractapis DROP COLUMN published; +ALTER TABLE contractapis DROP COLUMN network_name; diff --git a/db/migrations/sqlite/000113_add_contract_api_networkname.up.sql b/db/migrations/sqlite/000113_add_contract_api_networkname.up.sql new file mode 100644 index 0000000000..5060797270 --- /dev/null +++ b/db/migrations/sqlite/000113_add_contract_api_networkname.up.sql @@ -0,0 +1,5 @@ +ALTER TABLE contractapis ADD COLUMN published BOOLEAN DEFAULT false; +UPDATE contractapis SET published = true WHERE message_id IS NOT NULL; +ALTER TABLE contractapis ADD COLUMN network_name VARCHAR(64); +UPDATE contractapis SET network_name = name WHERE message_id IS NOT NULL; +CREATE UNIQUE INDEX contractapis_networkname ON contractapis(namespace,network_name); diff --git a/db/migrations/sqlite/000114_add_message_reject_reason.down.sql b/db/migrations/sqlite/000114_add_message_reject_reason.down.sql new file mode 100644 index 0000000000..be3647343a --- /dev/null +++ b/db/migrations/sqlite/000114_add_message_reject_reason.down.sql @@ -0,0 +1 @@ +ALTER TABLE messages DROP COLUMN reject_reason; diff --git a/db/migrations/sqlite/000114_add_message_reject_reason.up.sql b/db/migrations/sqlite/000114_add_message_reject_reason.up.sql new file mode 100644 index 0000000000..38b5e4bfa0 --- /dev/null +++ b/db/migrations/sqlite/000114_add_message_reject_reason.up.sql @@ -0,0 +1 @@ +ALTER TABLE messages ADD COLUMN reject_reason TEXT DEFAULT ''; diff --git a/db/migrations/sqlite/000115_rename_tokenpool_state.down.sql b/db/migrations/sqlite/000115_rename_tokenpool_state.down.sql new file mode 100644 index 0000000000..0ebd61afd4 --- /dev/null +++ b/db/migrations/sqlite/000115_rename_tokenpool_state.down.sql @@ -0,0 +1,3 @@ +ALTER TABLE tokenpool ADD COLUMN state VARCHAR(64); +UPDATE tokenpool SET state = (CASE WHEN active = true THEN 'confirmed' ELSE 'pending' END); +ALTER TABLE tokenpool DROP COLUMN active; diff --git a/db/migrations/sqlite/000115_rename_tokenpool_state.up.sql b/db/migrations/sqlite/000115_rename_tokenpool_state.up.sql new file mode 100644 index 0000000000..bf6e4fef5a --- /dev/null +++ b/db/migrations/sqlite/000115_rename_tokenpool_state.up.sql @@ -0,0 +1,3 @@ +ALTER TABLE tokenpool ADD COLUMN active BOOLEAN; +UPDATE tokenpool SET active = (CASE WHEN state = 'confirmed' THEN true ELSE false END); +ALTER TABLE tokenpool DROP COLUMN state; diff --git a/docs/Gemfile.lock b/docs/Gemfile.lock index d591f9fa60..48ad7373e9 100644 --- a/docs/Gemfile.lock +++ b/docs/Gemfile.lock @@ -281,4 +281,4 @@ DEPENDENCIES webrick BUNDLED WITH - 2.3.14 + 2.3.14 \ No newline at end of file diff --git a/docs/_i18n/en.yml b/docs/_i18n/en.yml index 3d1c2b0eb7..501860cc66 100644 --- a/docs/_i18n/en.yml +++ b/docs/_i18n/en.yml @@ -47,6 +47,7 @@ pages: remote_fabric_network: Remote Fabric Network release_notes: Release Notes security: Security + tls: TLS tools: Tools tutorials: Tutorials understanding_firefly: Understanding FireFly @@ -56,4 +57,5 @@ pages: fabric_test_network: Fabric-Samples Test Network xdc_testnet: XDC Testnet zksync_testnet: zkSync Testnet - rotate_dx_certs: Rotate Data Exchange Certificates \ No newline at end of file + rotate_dx_certs: Rotate Data Exchange Certificates + tezos_testnet: Tezos Testnet diff --git a/docs/architecture/blockchain_connector_framework.md b/docs/architecture/blockchain_connector_framework.md index 055a26bbfc..20d81adb69 100644 --- a/docs/architecture/blockchain_connector_framework.md +++ b/docs/architecture/blockchain_connector_framework.md @@ -5,7 +5,7 @@ parent: pages.architecture nav_order: 2 --- -# Multiparty Event Sequencing +# Blockchain Connector Toolkit {: .no_toc } ## Table of contents diff --git a/docs/architecture/ping_pong_txflow.md b/docs/architecture/ping_pong_txflow.md index 2470150cf1..3660b46e8a 100644 --- a/docs/architecture/ping_pong_txflow.md +++ b/docs/architecture/ping_pong_txflow.md @@ -67,7 +67,7 @@ This is deliberately a simple flow, and all kinds of additional layers might wel ## Authorize & Transfer Data (Member 1) -- Inpsect the request data +- Inspect the request data - Retrieve data asset by hash - Send the private data in a private message - No blockchain in this flow diff --git a/docs/config_docs_generate_test.go b/docs/config_docs_generate_test.go index c9855b995f..78b30a971e 100644 --- a/docs/config_docs_generate_test.go +++ b/docs/config_docs_generate_test.go @@ -27,12 +27,14 @@ import ( "github.com/hyperledger/firefly-common/pkg/config" "github.com/hyperledger/firefly/internal/apiserver" + "github.com/hyperledger/firefly/internal/coreconfig" "github.com/hyperledger/firefly/internal/namespace" "github.com/stretchr/testify/assert" ) func TestGenerateConfigDocs(t *testing.T) { // Initialize config of all plugins + coreconfig.Reset() namespace.InitConfig() apiserver.InitConfig() f, err := os.Create(filepath.Join("reference", "config.md")) diff --git a/docs/config_docs_test.go b/docs/config_docs_test.go index c63edcbea6..1198788da0 100644 --- a/docs/config_docs_test.go +++ b/docs/config_docs_test.go @@ -28,12 +28,14 @@ import ( "github.com/hyperledger/firefly-common/pkg/config" "github.com/hyperledger/firefly/internal/apiserver" + "github.com/hyperledger/firefly/internal/coreconfig" "github.com/hyperledger/firefly/internal/namespace" "github.com/stretchr/testify/assert" ) func TestConfigDocsUpToDate(t *testing.T) { // Initialize config of all plugins + coreconfig.Reset() namespace.InitConfig() apiserver.InitConfig() generatedConfig, err := config.GenerateConfigMarkdown(context.Background(), configDocHeader, config.GetKnownKeys()) diff --git a/docs/contributors/dev_environment_setup.md b/docs/contributors/dev_environment_setup.md index 350ab95582..254496bcc6 100644 --- a/docs/contributors/dev_environment_setup.md +++ b/docs/contributors/dev_environment_setup.md @@ -23,7 +23,7 @@ This guide will walk you through setting up your machine for contributing to Fir You will need a few prerequisites set up on your machine before you can build FireFly from source. We recommend doing development on macOS, Linux, or WSL 2.0. -- [Go 1.18](https://golang.org/dl/) +- [Go 1.21](https://golang.org/dl/) - make - GCC - openssl diff --git a/docs/contributors/index.md b/docs/contributors/index.md index 137f0b74a0..eb2cd12651 100644 --- a/docs/contributors/index.md +++ b/docs/contributors/index.md @@ -24,9 +24,9 @@ We welcome anyone to contribute to the FireFly project! If you're interested, th ## 🚀 Connect with us on Discord You can chat with maintainers and other contributors on Discord in the `firefly` channel: -[https://discord.gg/hyperledger](https://discord.gg/nnQw2aGhX6) +[https://discord.gg/hyperledger](https://discord.gg/hyperledger) -[Join Discord Server](https://discord.gg/nnQw2aGhX6){: .btn .btn-purple .mb-5} +[Join Discord Server](https://discord.gg/hyperledger){: .btn .btn-purple .mb-5} ## 📅 Join our Community Calls Community calls are a place to talk to other contributors, maintainers, and other people interested in FireFly. Maintainers often discuss upcoming changes and proposed new features on these calls. These calls are a great way for the community to give feedback on new ideas, ask questions about FireFly, and hear how others are using FireFly to solve real world problems. @@ -95,4 +95,4 @@ The Hyperledger Foundation and the FireFly project are committed to fostering a - Consider that users who will read the docs are from different background and cultures and that they have different preferences. - Avoid potential offensive terms and, for instance, prefer "allow list and deny list" to "white list and black list". - We believe that we all have a role to play to improve our world, and even if writing inclusive doc might not look like a huge improvement, it's a first step in the right direction. -- We suggest to refer to Microsoft bias free writing guidelines and Google inclusive doc writing guide as starting points. \ No newline at end of file +- We suggest to refer to Microsoft bias free writing guidelines and Google inclusive doc writing guide as starting points. diff --git a/docs/faqs/index.md b/docs/faqs/index.md index 54d2cdc119..94b69b9d18 100644 --- a/docs/faqs/index.md +++ b/docs/faqs/index.md @@ -27,4 +27,4 @@ Yes! Before you set up MetaMask you'll likely want to create some tokens that yo ## 🚀 Connect with us on Discord -If your question isn't answered here or if you have immediate questions please don't hesitate to reach out to us on [Discord](https://discord.gg/hyperledger_) in the `firefly` channel: +If your question isn't answered here or if you have immediate questions please don't hesitate to reach out to us on [Discord](https://discord.gg/hyperledger) in the `firefly` channel: diff --git a/docs/gettingstarted/index.md b/docs/gettingstarted/index.md index eec559e2ac..b342697a62 100644 --- a/docs/gettingstarted/index.md +++ b/docs/gettingstarted/index.md @@ -23,9 +23,9 @@ With this easy-to-follow guide, you'll go from "zero" to blockchain-hero in the ![FireFly Sandbox](../images/sandbox/sandbox_broadcast.png) ### We're here to help! -We want to make it as easy as possible for anyone to get started with FireFly, and we don't want anyone to feel like they're stuck. If you're having trouble, or are just curious about what else you can do with FireFly we encourage you to [join the Hyperledger Discord server](https://discord.gg/Fy7MJuqw86) and come chat with us in the #firefly channel. +We want to make it as easy as possible for anyone to get started with FireFly, and we don't want anyone to feel like they're stuck. If you're having trouble, or are just curious about what else you can do with FireFly we encourage you to [join the Hyperledger Discord server](https://discord.gg/hyperledger) and come chat with us in the #firefly channel. ## Get started: Install the FireFly CLI Now that you've got the FireFly CLI set up on your machine, the next step is to create and start a FireFly stack. -[① Install the FireFly CLI →](firefly_cli.md){: .btn .btn-purple .float-right .mb-5} \ No newline at end of file +[① Install the FireFly CLI →](firefly_cli.md){: .btn .btn-purple .float-right .mb-5} diff --git a/docs/gettingstarted/sandbox.md b/docs/gettingstarted/sandbox.md index fe7f83a514..8224bf5aeb 100644 --- a/docs/gettingstarted/sandbox.md +++ b/docs/gettingstarted/sandbox.md @@ -47,7 +47,7 @@ The FireFly explorer is a part of FireFly Core itself. It is a view into the sys When you set up your FireFly stack in the previous section, it should have printed some URLs like the following. Open the link in a browser for the `Sandbox UI for member '0'. It should be: [http://127.0.0.1:5109](http://127.0.0.1:5109) ``` -ff start demo +ff start dev this will take a few seconds longer since this is the first time you're running this stack... done @@ -63,7 +63,7 @@ Sandbox UI for member '2': http://127.0.0.1:5309 To see logs for your stack run: -ff logs demo +ff logs dev ``` @@ -126,4 +126,4 @@ The Contracts section of the Sandbox lets you interact with custom smart contrac - Use the Swagger UI to call a smart contract function that emits an event. Verify that the event is received in the Sandbox and shows up in the FireFly Explorer. ## Go forth and build! -At this point you should have a pretty good understanding of some of the major features of Hyperledger FireFly. Now, using what you've learned, you can go and build your own Web3 app! Don't forget to [join the Hyperledger Discord server](https://discord.gg/Fy7MJuqw86) and come chat with us in the #firefly channel. \ No newline at end of file +At this point you should have a pretty good understanding of some of the major features of Hyperledger FireFly. Now, using what you've learned, you can go and build your own Web3 app! Don't forget to [join the Hyperledger Discord server](https://discord.gg/hyperledger) and come chat with us in the #firefly channel. diff --git a/docs/gettingstarted/setup_env.md b/docs/gettingstarted/setup_env.md index 027e02d3e2..2495cfeb61 100644 --- a/docs/gettingstarted/setup_env.md +++ b/docs/gettingstarted/setup_env.md @@ -124,6 +124,8 @@ This may take a minute or two and in the background the FireFly CLI will do the - Deploy an `ERC-1155` token smart contract - Register an identity for each member and node +> **NOTE**: For macOS users, the default port (5000) is already in-use by `ControlCe` service (AirPlay Receiver). You can either [disable this service](https://support.apple.com/guide/mac-help/change-airdrop-handoff-settings-mchl6a407f99/13.0/mac/13.0) in your environment, or use a different port when creating your stack (e.g. `ff init dev -p 8000`) + After your stack finishes starting it will print out the links to each member's UI and the Sandbox for that node: ``` diff --git a/docs/images/idempotency_keys_architecture.jpg b/docs/images/idempotency_keys_architecture.jpg new file mode 100644 index 0000000000..fdb885dc96 Binary files /dev/null and b/docs/images/idempotency_keys_architecture.jpg differ diff --git a/docs/overview/key_components/security.md b/docs/overview/key_components/security.md index 0214b67953..6a70accd38 100644 --- a/docs/overview/key_components/security.md +++ b/docs/overview/key_components/security.md @@ -59,4 +59,4 @@ See the reference implementation > message queue based reliable delivery of messages, hub-and-spoke connectivity models, chunking > of very large file payloads, and end-to-end encryption. -Learn more about these private data flows in [Multiparty Process Flows](../multiparty/multiparty_flow.md). \ No newline at end of file +Learn more about these private data flows in [Multiparty Process Flows](../multiparty/multiparty_flow.md). diff --git a/docs/reference/api_query_syntax.md b/docs/reference/api_query_syntax.md index 79355f4f97..d74b2515e5 100644 --- a/docs/reference/api_query_syntax.md +++ b/docs/reference/api_query_syntax.md @@ -66,11 +66,13 @@ perform against the match string. Modifiers can appear before the operator, to change its behavior. -| Modifier | Description | -|----------|------------------------------------| -| `!` | Not - negates the match | -| `:` | Case insensitive | -| `?` | Treat empty match string as null | +| Modifier | Description | +|----------|------------------------------------------------| +| `!` | Not - negates the match | +| `:` | Case insensitive | +| `?` | Treat empty match string as null | +| `[` | Combine using `AND` on the same field | +| `]` | Combine using `OR` on the same field (default) | ## Detailed examples @@ -88,3 +90,17 @@ behavior. | `!$-cat` | Does not end with "-cat" | | `?=` | Is null | | `!?=` | Is not null | + +## Time range example + +For this case we need to combine multiple queries on the same `created` +field using AND semantics (with the `[`) modifier: + +``` +?created=[>>2021-01-01T00:00:00Z&created=[<=2021-01-02T00:00:00Z +``` + +So this means: +- `created` greater than `2021-01-01T00:00:00Z` +- `AND` +- `created` less than or equal to `2021-01-02T00:00:00Z` \ No newline at end of file diff --git a/docs/reference/config.md b/docs/reference/config.md index 4a63b53153..fbb937f574 100644 --- a/docs/reference/config.md +++ b/docs/reference/config.md @@ -27,141 +27,156 @@ nav_order: 2 |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|defaultFilterLimit|The maximum number of rows to return if no limit is specified on an API request|`int`|`` -|maxFilterLimit|The largest value of `limit` that an HTTP client can specify in a request|`int`|`` -|passthroughHeaders|A list of HTTP request headers to pass through to dependency microservices|`[]string`|`` -|requestMaxTimeout|The maximum amount of time that an HTTP client can specify in a `Request-Timeout` header to keep a specific request open|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` -|requestTimeout|The maximum amount of time that a request is allowed to remain open|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` +|defaultFilterLimit|The maximum number of rows to return if no limit is specified on an API request|`int`|`25` +|dynamicPublicURLHeader|Dynamic header that informs the backend the base public URL for the request, in order to build URL links in OpenAPI/SwaggerUI|`string`|`` +|maxFilterLimit|The largest value of `limit` that an HTTP client can specify in a request|`int`|`1000` +|passthroughHeaders|A list of HTTP request headers to pass through to dependency microservices|`[]string`|`[]` +|requestMaxTimeout|The maximum amount of time that an HTTP client can specify in a `Request-Timeout` header to keep a specific request open|[`time.Duration`](https://pkg.go.dev/time#Duration)|`10m` +|requestTimeout|The maximum amount of time that a request is allowed to remain open|[`time.Duration`](https://pkg.go.dev/time#Duration)|`120s` ## asset.manager |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|keyNormalization|Mechanism to normalize keys before using them. Valid options are `blockchain_plugin` - use blockchain plugin (default) or `none` - do not attempt normalization (deprecated - use namespaces.predefined[].asset.manager.keyNormalization)|`string`|`` +|keyNormalization|Mechanism to normalize keys before using them. Valid options are `blockchain_plugin` - use blockchain plugin (default) or `none` - do not attempt normalization (deprecated - use namespaces.predefined[].asset.manager.keyNormalization)|`string`|`blockchain_plugin` ## batch.manager |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|minimumPollDelay|The minimum time the batch manager waits between polls on the DB - to prevent thrashing|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` -|pollTimeout|How long to wait without any notifications of new messages before doing a page query|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` -|readPageSize|The size of each page of messages read from the database into memory when assembling batches|`int`|`` +|minimumPollDelay|The minimum time the batch manager waits between polls on the DB - to prevent thrashing|[`time.Duration`](https://pkg.go.dev/time#Duration)|`100ms` +|pollTimeout|How long to wait without any notifications of new messages before doing a page query|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` +|readPageSize|The size of each page of messages read from the database into memory when assembling batches|`int`|`100` ## batch.retry |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|factor|The retry backoff factor|`float32`|`` -|initDelay|The initial retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` -|maxDelay|The maximum retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` +|factor|The retry backoff factor|`float32`|`2` +|initDelay|The initial retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`250ms` +|maxDelay|The maximum retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` ## blobreceiver.retry |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|factor|The retry backoff factor|`float32`|`` -|initialDelay|The initial retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` -|maxDelay|The maximum retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` +|factor|The retry backoff factor|`float32`|`2` +|initialDelay|The initial retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`250ms` +|maxDelay|The maximum retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`1m` ## blobreceiver.worker |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|batchMaxInserts|The maximum number of items the blob receiver worker will insert in a batch|`int`|`` -|batchTimeout|The maximum amount of the the blob receiver worker will wait|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` -|count|The number of blob receiver workers|`int`|`` +|batchMaxInserts|The maximum number of items the blob receiver worker will insert in a batch|`int`|`200` +|batchTimeout|The maximum amount of the the blob receiver worker will wait|[`time.Duration`](https://pkg.go.dev/time#Duration)|`50ms` +|count|The number of blob receiver workers|`int`|`5` ## broadcast.batch |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|agentTimeout|How long to keep around a batching agent for a sending identity before disposal|`string`|`` -|payloadLimit|The maximum payload size of a batch for broadcast messages|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`` -|size|The maximum number of messages that can be packed into a batch|`int`|`` -|timeout|The timeout to wait for a batch to fill, before sending|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` +|agentTimeout|How long to keep around a batching agent for a sending identity before disposal|`string`|`2m` +|payloadLimit|The maximum payload size of a batch for broadcast messages|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`800Kb` +|size|The maximum number of messages that can be packed into a batch|`int`|`200` +|timeout|The timeout to wait for a batch to fill, before sending|[`time.Duration`](https://pkg.go.dev/time#Duration)|`1s` ## cache |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|enabled|Enables caching, defaults to true|`boolean`|`` +|enabled|Enables caching, defaults to true|`boolean`|`true` ## cache.addressresolver |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|limit|Max number of cached items for address resolver|`int`|`` -|ttl|Time to live of cached items for address resolver|`string`|`` +|limit|Max number of cached items for address resolver|`int`|`1000` +|ttl|Time to live of cached items for address resolver|`string`|`24h` ## cache.batch |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|limit|Max number of cached items for batches|`int`|`` -|ttl|Time to live of cache items for batches|`string`|`` +|limit|Max number of cached items for batches|`int`|`100` +|ttl|Time to live of cache items for batches|`string`|`5m` ## cache.blockchain |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|limit|Max number of cached items for blockchain|`int`|`` -|ttl|Time to live of cached items for blockchain|`string`|`` +|limit|Max number of cached items for blockchain|`int`|`100` +|ttl|Time to live of cached items for blockchain|`string`|`5m` ## cache.blockchainevent |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|limit|Max number of cached blockchain events for transactions|`int`|`` -|ttl|Time to live of cached blockchain events for transactions|`string`|`` +|limit|Max number of cached blockchain events for transactions|`int`|`1000` +|ttl|Time to live of cached blockchain events for transactions|`string`|`5m` ## cache.eventlistenertopic |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|limit|Max number of cached items for blockchain listener topics|`int`|`` -|ttl|Time to live of cached items for blockchain listener topics|`string`|`` +|limit|Max number of cached items for blockchain listener topics|`int`|`100` +|ttl|Time to live of cached items for blockchain listener topics|`string`|`5m` ## cache.group |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|limit|Max number of cached items for groups|`int`|`` -|ttl|Time to live of cached items for groups|`string`|`` +|limit|Max number of cached items for groups|`int`|`50` +|ttl|Time to live of cached items for groups|`string`|`1h` ## cache.identity |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|limit|Max number of cached identities for identity manager|`int`|`` -|ttl|Time to live of cached identities for identity manager|`string`|`` +|limit|Max number of cached identities for identity manager|`int`|`100` +|ttl|Time to live of cached identities for identity manager|`string`|`1h` ## cache.message |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|size|Max size of cached messages for data manager|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`` -|ttl|Time to live of cached messages for data manager|`string`|`` +|size|Max size of cached messages for data manager|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`50Mb` +|ttl|Time to live of cached messages for data manager|`string`|`5m` + +## cache.methods + +|Key|Description|Type|Default Value| +|---|-----------|----|-------------| +|limit|Max number of cached items for schema validations on blockchain methods|`int`|`200` +|ttl|Time to live of cached items for schema validations on blockchain methods|`string`|`5m` ## cache.operations |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|limit|Max number of cached items for operations|`int`|`` -|ttl|Time to live of cached items for operations|`string`|`` +|limit|Max number of cached items for operations|`int`|`1000` +|ttl|Time to live of cached items for operations|`string`|`5m` + +## cache.tokenpool + +|Key|Description|Type|Default Value| +|---|-----------|----|-------------| +|limit|Max number of cached items for token pools|`int`|`100` +|ttl|Time to live of cached items for token pool|`string`|`1h` ## cache.transaction |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|size|Max size of cached transactions|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`` -|ttl|Time to live of cached transactions|`string`|`` +|size|Max size of cached transactions|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`1Mb` +|ttl|Time to live of cached transactions|`string`|`5m` ## cache.validator |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|size|Max size of cached validators for data manager|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`` -|ttl|Time to live of cached validators for data manager|`string`|`` +|size|Max size of cached validators for data manager|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`1Mb` +|ttl|Time to live of cached validators for data manager|`string`|`1h` ## config @@ -185,58 +200,58 @@ nav_order: 2 |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|address|The HTTP interface the go debugger binds to|`string`|`` -|port|An HTTP port on which to enable the go debugger|`int`|`` +|address|The HTTP interface the go debugger binds to|`string`|`localhost` +|port|An HTTP port on which to enable the go debugger|`int`|`-1` ## download.retry |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|factor|The retry backoff factor|`float32`|`` -|initialDelay|The initial retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` -|maxAttempts|The maximum number attempts|`int`|`` -|maxDelay|The maximum retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` +|factor|The retry backoff factor|`float32`|`2` +|initialDelay|The initial retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`100ms` +|maxAttempts|The maximum number attempts|`int`|`100` +|maxDelay|The maximum retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`1m` ## download.worker |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|count|The number of download workers|`int`|`` +|count|The number of download workers|`int`|`10` |queueLength|The length of the work queue in the channel to the workers - defaults to 2x the worker count|`int`|`` ## event.aggregator |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|batchSize|The maximum number of records to read from the DB before performing an aggregation run|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`` -|batchTimeout|How long to wait for new events to arrive before performing aggregation on a page of events|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` -|firstEvent|The first event the aggregator should process, if no previous offest is stored in the DB. Valid options are `oldest` or `newest`|`string`|`` -|pollTimeout|The time to wait without a notification of new events, before trying a select on the table|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` -|rewindQueryLimit|Safety limit on the maximum number of records to search when performing queries to search for rewinds|`int`|`` -|rewindQueueLength|The size of the queue into the rewind dispatcher|`int`|`` -|rewindTimeout|The minimum time to wait for rewinds to accumulate before resolving them|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` +|batchSize|The maximum number of records to read from the DB before performing an aggregation run|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`200` +|batchTimeout|How long to wait for new events to arrive before performing aggregation on a page of events|[`time.Duration`](https://pkg.go.dev/time#Duration)|`250ms` +|firstEvent|The first event the aggregator should process, if no previous offest is stored in the DB. Valid options are `oldest` or `newest`|`string`|`oldest` +|pollTimeout|The time to wait without a notification of new events, before trying a select on the table|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` +|rewindQueryLimit|Safety limit on the maximum number of records to search when performing queries to search for rewinds|`int`|`1000` +|rewindQueueLength|The size of the queue into the rewind dispatcher|`int`|`10` +|rewindTimeout|The minimum time to wait for rewinds to accumulate before resolving them|[`time.Duration`](https://pkg.go.dev/time#Duration)|`50ms` ## event.aggregator.retry |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|factor|The retry backoff factor|`float32`|`` -|initDelay|The initial retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` -|maxDelay|The maximum retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` +|factor|The retry backoff factor|`float32`|`2` +|initDelay|The initial retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`100ms` +|maxDelay|The maximum retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` ## event.dbevents |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|bufferSize|The size of the buffer of change events|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`` +|bufferSize|The size of the buffer of change events|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`100` ## event.dispatcher |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|batchTimeout|A short time to wait for new events to arrive before re-polling for new events|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` -|bufferLength|The number of events + attachments an individual dispatcher should hold in memory ready for delivery to the subscription|`int`|`` -|pollTimeout|The time to wait without a notification of new events, before trying a select on the table|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` +|batchTimeout|A short time to wait for new events to arrive before re-polling for new events|[`time.Duration`](https://pkg.go.dev/time#Duration)|`250ms` +|bufferLength|The number of events + attachments an individual dispatcher should hold in memory ready for delivery to the subscription|`int`|`5` +|pollTimeout|The time to wait without a notification of new events, before trying a select on the table|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` ## event.dispatcher.retry @@ -250,8 +265,8 @@ nav_order: 2 |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|default|The default event transport for new subscriptions|`string`|`` -|enabled|Which event interface plugins are enabled|`boolean`|`` +|default|The default event transport for new subscriptions|`string`|`websockets` +|enabled|Which event interface plugins are enabled|`boolean`|`[websockets webhooks]` ## events.webhooks @@ -261,6 +276,7 @@ nav_order: 2 |expectContinueTimeout|See [ExpectContinueTimeout in the Go docs](https://pkg.go.dev/net/http#Transport)|[`time.Duration`](https://pkg.go.dev/time#Duration)|`1s` |headers|Adds custom headers to HTTP requests|`map[string]string`|`` |idleTimeout|The max duration to hold a HTTP keepalive connection between calls|[`time.Duration`](https://pkg.go.dev/time#Duration)|`475ms` +|maxConnsPerHost|The max number of connections, per unique hostname. Zero means no limit|`int`|`0` |maxIdleConns|The max number of idle connections to hold pooled|`int`|`100` |passthroughHeadersEnabled|Enable passing through the set of allowed HTTP request headers|`boolean`|`false` |requestTimeout|The maximum amount of time that a request is allowed to remain open|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` @@ -285,6 +301,7 @@ nav_order: 2 |---|-----------|----|-------------| |count|The maximum number of times to retry|`int`|`5` |enabled|Enables retries|`boolean`|`false` +|errorStatusCodeRegex|The regex that the error response status code must match to trigger retry|`string`|`` |initWaitTime|The initial retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`250ms` |maxWaitTime|The maximum retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` @@ -296,7 +313,9 @@ nav_order: 2 |certFile|The path to the certificate file for TLS on this API|`string`|`` |clientAuth|Enables or disables client auth for TLS on this API|`string`|`` |enabled|Enables or disables TLS on this API|`boolean`|`false` +|insecureSkipHostVerify|When to true in unit test development environments to disable TLS verification. Use with extreme caution|`boolean`|`` |keyFile|The path to the private key file for TLS on this API|`string`|`` +|requiredDNAttributes|A set of required subject DN attributes. Each entry is a regular expression, and the subject certificate must have a matching attribute of the specified type (CN, C, O, OU, ST, L, STREET, POSTALCODE, SERIALNUMBER are valid attributes)|`map[string]string`|`` ## events.websockets @@ -309,7 +328,7 @@ nav_order: 2 |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|maxChartRows|The maximum rows to fetch for each histogram bucket|`int`|`` +|maxChartRows|The maximum rows to fetch for each histogram bucket|`int`|`100` ## http @@ -342,7 +361,9 @@ nav_order: 2 |certFile|The path to the certificate file for TLS on this API|`string`|`` |clientAuth|Enables or disables client auth for TLS on this API|`string`|`` |enabled|Enables or disables TLS on this API|`boolean`|`false` +|insecureSkipHostVerify|When to true in unit test development environments to disable TLS verification. Use with extreme caution|`boolean`|`` |keyFile|The path to the private key file for TLS on this API|`string`|`` +|requiredDNAttributes|A set of required subject DN attributes. Each entry is a regular expression, and the subject certificate must have a matching attribute of the specified type (CN, C, O, OU, ST, L, STREET, POSTALCODE, SERIALNUMBER are valid attributes)|`map[string]string`|`` ## log @@ -350,39 +371,39 @@ nav_order: 2 |---|-----------|----|-------------| |compress|Determines if the rotated log files should be compressed using gzip|`boolean`|`` |filename|Filename is the file to write logs to. Backup log files will be retained in the same directory|`string`|`` -|filesize|MaxSize is the maximum size the log file before it gets rotated|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`` +|filesize|MaxSize is the maximum size the log file before it gets rotated|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`100m` |forceColor|Force color to be enabled, even when a non-TTY output is detected|`boolean`|`` -|includeCodeInfo|Enables the report caller for including the calling file and line number, and the calling function. If using text logs, it uses the logrus text format rather than the default prefix format.|`boolean`|`` -|level|The log level - error, warn, info, debug, trace|`string`|`` -|maxAge|The maximum time to retain old log files based on the timestamp encoded in their filename|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` -|maxBackups|Maximum number of old log files to retain|`int`|`` +|includeCodeInfo|Enables the report caller for including the calling file and line number, and the calling function. If using text logs, it uses the logrus text format rather than the default prefix format.|`boolean`|`false` +|level|The log level - error, warn, info, debug, trace|`string`|`info` +|maxAge|The maximum time to retain old log files based on the timestamp encoded in their filename|[`time.Duration`](https://pkg.go.dev/time#Duration)|`24h` +|maxBackups|Maximum number of old log files to retain|`int`|`2` |noColor|Force color to be disabled, event when TTY output is detected|`boolean`|`` -|timeFormat|Custom time format for logs|[Time format](https://pkg.go.dev/time#pkg-constants) `string`|`` -|utc|Use UTC timestamps for logs|`boolean`|`` +|timeFormat|Custom time format for logs|[Time format](https://pkg.go.dev/time#pkg-constants) `string`|`2006-01-02T15:04:05.000Z07:00` +|utc|Use UTC timestamps for logs|`boolean`|`false` ## log.json |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|enabled|Enables JSON formatted logs rather than text. All log color settings are ignored when enabled.|`boolean`|`` +|enabled|Enables JSON formatted logs rather than text. All log color settings are ignored when enabled.|`boolean`|`false` ## log.json.fields |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|file|configures the JSON key containing the calling file|`string`|`` -|func|Configures the JSON key containing the calling function|`string`|`` -|level|Configures the JSON key containing the log level|`string`|`` -|message|Configures the JSON key containing the log message|`string`|`` -|timestamp|Configures the JSON key containing the timestamp of the log|`string`|`` +|file|configures the JSON key containing the calling file|`string`|`file` +|func|Configures the JSON key containing the calling function|`string`|`func` +|level|Configures the JSON key containing the log level|`string`|`level` +|message|Configures the JSON key containing the log message|`string`|`message` +|timestamp|Configures the JSON key containing the timestamp of the log|`string`|`@timestamp` ## message.writer |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|batchMaxInserts|The maximum number of database inserts to include when writing a single batch of messages + data|`int`|`` -|batchTimeout|How long to wait for more messages to arrive before flushing the batch|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` -|count|The number of message writer workers|`int`|`` +|batchMaxInserts|The maximum number of database inserts to include when writing a single batch of messages + data|`int`|`200` +|batchTimeout|How long to wait for more messages to arrive before flushing the batch|[`time.Duration`](https://pkg.go.dev/time#Duration)|`10ms` +|count|The number of message writer workers|`int`|`5` ## metrics @@ -417,13 +438,15 @@ nav_order: 2 |certFile|The path to the certificate file for TLS on this API|`string`|`` |clientAuth|Enables or disables client auth for TLS on this API|`string`|`` |enabled|Enables or disables TLS on this API|`boolean`|`false` +|insecureSkipHostVerify|When to true in unit test development environments to disable TLS verification. Use with extreme caution|`boolean`|`` |keyFile|The path to the private key file for TLS on this API|`string`|`` +|requiredDNAttributes|A set of required subject DN attributes. Each entry is a regular expression, and the subject certificate must have a matching attribute of the specified type (CN, C, O, OU, ST, L, STREET, POSTALCODE, SERIALNUMBER are valid attributes)|`map[string]string`|`` ## namespaces |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|default|The default namespace - must be in the predefined list|`string`|`` +|default|The default namespace - must be in the predefined list|`string`|`default` |predefined|A list of namespaces to ensure exists, without requiring a broadcast from the network|List `string`|`` ## namespaces.predefined[] @@ -471,13 +494,31 @@ nav_order: 2 |key|The signing key allocated to the root organization within this namespace|`string`|`` |name|A short name for the local root organization within this namespace|`string`|`` +## namespaces.predefined[].tlsConfigs[] + +|Key|Description|Type|Default Value| +|---|-----------|----|-------------| +|name|Name of the TLS Config|`string`|`` + +## namespaces.predefined[].tlsConfigs[].tls + +|Key|Description|Type|Default Value| +|---|-----------|----|-------------| +|caFile|The path to the CA file for TLS on this API|`string`|`` +|certFile|The path to the certificate file for TLS on this API|`string`|`` +|clientAuth|Enables or disables client auth for TLS on this API|`string`|`` +|enabled|Enables or disables TLS on this API|`boolean`|`false` +|insecureSkipHostVerify|When to true in unit test development environments to disable TLS verification. Use with extreme caution|`boolean`|`` +|keyFile|The path to the private key file for TLS on this API|`string`|`` +|requiredDNAttributes|A set of required subject DN attributes. Each entry is a regular expression, and the subject certificate must have a matching attribute of the specified type (CN, C, O, OU, ST, L, STREET, POSTALCODE, SERIALNUMBER are valid attributes)|`map[string]string`|`` + ## namespaces.retry |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|factor|The retry backoff factor|`float32`|`` -|initDelay|The initial retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` -|maxDelay|The maximum retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` +|factor|The retry backoff factor|`float32`|`2` +|initDelay|The initial retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`5s` +|maxDelay|The maximum retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`1m` ## node @@ -490,24 +531,24 @@ nav_order: 2 |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|factor|The retry backoff factor|`float32`|`` -|initialDelay|The initial retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` -|maxDelay|The maximum retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` +|factor|The retry backoff factor|`float32`|`2` +|initialDelay|The initial retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`250ms` +|maxDelay|The maximum retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`1m` ## opupdate.worker |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|batchMaxInserts|The maximum number of database inserts to include when writing a single batch of messages + data|`int`|`` -|batchTimeout|How long to wait for more messages to arrive before flushing the batch|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` -|count|The number of operation update works|`int`|`` -|queueLength|The size of the queue for the Operation Update worker|`int`|`` +|batchMaxInserts|The maximum number of database inserts to include when writing a single batch of messages + data|`int`|`200` +|batchTimeout|How long to wait for more messages to arrive before flushing the batch|[`time.Duration`](https://pkg.go.dev/time#Duration)|`50ms` +|count|The number of operation update works|`int`|`5` +|queueLength|The size of the queue for the Operation Update worker|`int`|`50` ## orchestrator |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|startupAttempts|The number of times to attempt to connect to core infrastructure on startup|`string`|`` +|startupAttempts|The number of times to attempt to connect to core infrastructure on startup|`string`|`5` ## org @@ -559,6 +600,7 @@ nav_order: 2 |expectContinueTimeout|See [ExpectContinueTimeout in the Go docs](https://pkg.go.dev/net/http#Transport)|[`time.Duration`](https://pkg.go.dev/time#Duration)|`1s` |headers|Adds custom headers to HTTP requests|`string`|`` |idleTimeout|The max duration to hold a HTTP keepalive connection between calls|[`time.Duration`](https://pkg.go.dev/time#Duration)|`475ms` +|maxConnsPerHost|The max number of connections, per unique hostname. Zero means no limit|`int`|`0` |maxIdleConns|The max number of idle connections to hold pooled|`int`|`100` |method|The HTTP method to use when making requests to the Address Resolver|`string`|`GET` |passthroughHeadersEnabled|Enable passing through the set of allowed HTTP request headers|`boolean`|`false` @@ -588,6 +630,7 @@ nav_order: 2 |---|-----------|----|-------------| |count|The maximum number of times to retry|`int`|`5` |enabled|Enables retries|`boolean`|`false` +|errorStatusCodeRegex|The regex that the error response status code must match to trigger retry|`string`|`` |initWaitTime|The initial retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`250ms` |maxWaitTime|The maximum retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` @@ -599,7 +642,9 @@ nav_order: 2 |certFile|The path to the certificate file for TLS on this API|`string`|`` |clientAuth|Enables or disables client auth for TLS on this API|`string`|`` |enabled|Enables or disables TLS on this API|`boolean`|`false` +|insecureSkipHostVerify|When to true in unit test development environments to disable TLS verification. Use with extreme caution|`boolean`|`` |keyFile|The path to the private key file for TLS on this API|`string`|`` +|requiredDNAttributes|A set of required subject DN attributes. Each entry is a regular expression, and the subject certificate must have a matching attribute of the specified type (CN, C, O, OU, ST, L, STREET, POSTALCODE, SERIALNUMBER are valid attributes)|`map[string]string`|`` ## plugins.blockchain[].ethereum.ethconnect @@ -613,6 +658,7 @@ nav_order: 2 |headers|Adds custom headers to HTTP requests|`map[string]string`|`` |idleTimeout|The max duration to hold a HTTP keepalive connection between calls|[`time.Duration`](https://pkg.go.dev/time#Duration)|`475ms` |instance|The Ethereum address of the FireFly BatchPin smart contract that has been deployed to the blockchain|Address `string`|`` +|maxConnsPerHost|The max number of connections, per unique hostname. Zero means no limit|`int`|`0` |maxIdleConns|The max number of idle connections to hold pooled|`int`|`100` |passthroughHeadersEnabled|Enable passing through the set of allowed HTTP request headers|`boolean`|`false` |prefixLong|The prefix that will be used for Ethconnect specific HTTP headers when FireFly makes requests to Ethconnect|`string`|`firefly` @@ -629,6 +675,15 @@ nav_order: 2 |password|Password|`string`|`` |username|Username|`string`|`` +## plugins.blockchain[].ethereum.ethconnect.backgroundStart + +|Key|Description|Type|Default Value| +|---|-----------|----|-------------| +|enabled|Start the Ethconnect plugin in the background and enter retry loop if failed to start|`boolean`|`` +|factor|Set the factor by which the delay increases when retrying|`float32`|`2` +|initialDelay|Delay between restarts in the case where we retry to restart the ethereum plugin|[`time.Duration`](https://pkg.go.dev/time#Duration)|`5s` +|maxDelay|Max delay between restarts in the case where we retry to restart the ethereum plugin|[`time.Duration`](https://pkg.go.dev/time#Duration)|`1m` + ## plugins.blockchain[].ethereum.ethconnect.proxy |Key|Description|Type|Default Value| @@ -641,6 +696,7 @@ nav_order: 2 |---|-----------|----|-------------| |count|The maximum number of times to retry|`int`|`5` |enabled|Enables retries|`boolean`|`false` +|errorStatusCodeRegex|The regex that the error response status code must match to trigger retry|`string`|`` |initWaitTime|The initial retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`250ms` |maxWaitTime|The maximum retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` @@ -652,16 +708,20 @@ nav_order: 2 |certFile|The path to the certificate file for TLS on this API|`string`|`` |clientAuth|Enables or disables client auth for TLS on this API|`string`|`` |enabled|Enables or disables TLS on this API|`boolean`|`false` +|insecureSkipHostVerify|When to true in unit test development environments to disable TLS verification. Use with extreme caution|`boolean`|`` |keyFile|The path to the private key file for TLS on this API|`string`|`` +|requiredDNAttributes|A set of required subject DN attributes. Each entry is a regular expression, and the subject certificate must have a matching attribute of the specified type (CN, C, O, OU, ST, L, STREET, POSTALCODE, SERIALNUMBER are valid attributes)|`map[string]string`|`` ## plugins.blockchain[].ethereum.ethconnect.ws |Key|Description|Type|Default Value| |---|-----------|----|-------------| +|connectionTimeout|The amount of time to wait while establishing a connection (or auto-reconnection)|[`time.Duration`](https://pkg.go.dev/time#Duration)|`45s` |heartbeatInterval|The amount of time to wait between heartbeat signals on the WebSocket connection|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` |initialConnectAttempts|The number of attempts FireFly will make to connect to the WebSocket when starting up, before failing|`int`|`5` |path|The WebSocket sever URL to which FireFly should connect|WebSocket URL `string`|`` |readBufferSize|The size in bytes of the read buffer for the WebSocket connection|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`16Kb` +|url|URL to use for WebSocket - overrides url one level up (in the HTTP config)|`string`|`` |writeBufferSize|The size in bytes of the write buffer for the WebSocket connection|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`16Kb` ## plugins.blockchain[].ethereum.fftm @@ -672,6 +732,7 @@ nav_order: 2 |expectContinueTimeout|See [ExpectContinueTimeout in the Go docs](https://pkg.go.dev/net/http#Transport)|[`time.Duration`](https://pkg.go.dev/time#Duration)|`1s` |headers|Adds custom headers to HTTP requests|`map[string]string`|`` |idleTimeout|The max duration to hold a HTTP keepalive connection between calls|[`time.Duration`](https://pkg.go.dev/time#Duration)|`475ms` +|maxConnsPerHost|The max number of connections, per unique hostname. Zero means no limit|`int`|`0` |maxIdleConns|The max number of idle connections to hold pooled|`int`|`100` |passthroughHeadersEnabled|Enable passing through the set of allowed HTTP request headers|`boolean`|`false` |requestTimeout|The maximum amount of time that a request is allowed to remain open|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` @@ -697,6 +758,7 @@ nav_order: 2 |---|-----------|----|-------------| |count|The maximum number of times to retry|`int`|`5` |enabled|Enables retries|`boolean`|`false` +|errorStatusCodeRegex|The regex that the error response status code must match to trigger retry|`string`|`` |initWaitTime|The initial retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`250ms` |maxWaitTime|The maximum retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` @@ -708,7 +770,9 @@ nav_order: 2 |certFile|The path to the certificate file for TLS on this API|`string`|`` |clientAuth|Enables or disables client auth for TLS on this API|`string`|`` |enabled|Enables or disables TLS on this API|`boolean`|`false` +|insecureSkipHostVerify|When to true in unit test development environments to disable TLS verification. Use with extreme caution|`boolean`|`` |keyFile|The path to the private key file for TLS on this API|`string`|`` +|requiredDNAttributes|A set of required subject DN attributes. Each entry is a regular expression, and the subject certificate must have a matching attribute of the specified type (CN, C, O, OU, ST, L, STREET, POSTALCODE, SERIALNUMBER are valid attributes)|`map[string]string`|`` ## plugins.blockchain[].fabric.fabconnect @@ -722,6 +786,7 @@ nav_order: 2 |expectContinueTimeout|See [ExpectContinueTimeout in the Go docs](https://pkg.go.dev/net/http#Transport)|[`time.Duration`](https://pkg.go.dev/time#Duration)|`1s` |headers|Adds custom headers to HTTP requests|`map[string]string`|`` |idleTimeout|The max duration to hold a HTTP keepalive connection between calls|[`time.Duration`](https://pkg.go.dev/time#Duration)|`475ms` +|maxConnsPerHost|The max number of connections, per unique hostname. Zero means no limit|`int`|`0` |maxIdleConns|The max number of idle connections to hold pooled|`int`|`100` |passthroughHeadersEnabled|Enable passing through the set of allowed HTTP request headers|`boolean`|`false` |prefixLong|The prefix that will be used for Fabconnect specific HTTP headers when FireFly makes requests to Fabconnect|`string`|`firefly` @@ -739,6 +804,15 @@ nav_order: 2 |password|Password|`string`|`` |username|Username|`string`|`` +## plugins.blockchain[].fabric.fabconnect.backgroundStart + +|Key|Description|Type|Default Value| +|---|-----------|----|-------------| +|enabled|Start the fabric plugin in the background and enter retry loop if failed to start|`boolean`|`` +|factor|Set the factor by which the delay increases when retrying|`float32`|`2` +|initialDelay|Delay between restarts in the case where we retry to restart the fabric plugin|[`time.Duration`](https://pkg.go.dev/time#Duration)|`5s` +|maxDelay|Max delay between restarts in the case where we retry to restart the fabric plugin|[`time.Duration`](https://pkg.go.dev/time#Duration)|`1m` + ## plugins.blockchain[].fabric.fabconnect.proxy |Key|Description|Type|Default Value| @@ -751,6 +825,7 @@ nav_order: 2 |---|-----------|----|-------------| |count|The maximum number of times to retry|`int`|`5` |enabled|Enables retries|`boolean`|`false` +|errorStatusCodeRegex|The regex that the error response status code must match to trigger retry|`string`|`` |initWaitTime|The initial retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`250ms` |maxWaitTime|The maximum retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` @@ -762,16 +837,152 @@ nav_order: 2 |certFile|The path to the certificate file for TLS on this API|`string`|`` |clientAuth|Enables or disables client auth for TLS on this API|`string`|`` |enabled|Enables or disables TLS on this API|`boolean`|`false` +|insecureSkipHostVerify|When to true in unit test development environments to disable TLS verification. Use with extreme caution|`boolean`|`` |keyFile|The path to the private key file for TLS on this API|`string`|`` +|requiredDNAttributes|A set of required subject DN attributes. Each entry is a regular expression, and the subject certificate must have a matching attribute of the specified type (CN, C, O, OU, ST, L, STREET, POSTALCODE, SERIALNUMBER are valid attributes)|`map[string]string`|`` ## plugins.blockchain[].fabric.fabconnect.ws |Key|Description|Type|Default Value| |---|-----------|----|-------------| +|connectionTimeout|The amount of time to wait while establishing a connection (or auto-reconnection)|[`time.Duration`](https://pkg.go.dev/time#Duration)|`45s` |heartbeatInterval|The amount of time to wait between heartbeat signals on the WebSocket connection|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` |initialConnectAttempts|The number of attempts FireFly will make to connect to the WebSocket when starting up, before failing|`int`|`5` |path|The WebSocket sever URL to which FireFly should connect|WebSocket URL `string`|`` |readBufferSize|The size in bytes of the read buffer for the WebSocket connection|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`16Kb` +|url|URL to use for WebSocket - overrides url one level up (in the HTTP config)|`string`|`` +|writeBufferSize|The size in bytes of the write buffer for the WebSocket connection|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`16Kb` + +## plugins.blockchain[].tezos.addressResolver + +|Key|Description|Type|Default Value| +|---|-----------|----|-------------| +|alwaysResolve|Causes the address resolver to be invoked on every API call that submits a signing key. Also disables any result caching|`boolean`|`` +|bodyTemplate|The body go template string to use when making HTTP requests|[Go Template](https://pkg.go.dev/text/template) `string`|`` +|connectionTimeout|The maximum amount of time that a connection is allowed to remain with no data transmitted|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` +|expectContinueTimeout|See [ExpectContinueTimeout in the Go docs](https://pkg.go.dev/net/http#Transport)|[`time.Duration`](https://pkg.go.dev/time#Duration)|`1s` +|headers|Adds custom headers to HTTP requests|`map[string]string`|`` +|idleTimeout|The max duration to hold a HTTP keepalive connection between calls|[`time.Duration`](https://pkg.go.dev/time#Duration)|`475ms` +|maxConnsPerHost|The max number of connections, per unique hostname. Zero means no limit|`int`|`0` +|maxIdleConns|The max number of idle connections to hold pooled|`int`|`100` +|method|The HTTP method to use when making requests to the Address Resolver|`string`|`GET` +|passthroughHeadersEnabled|Enable passing through the set of allowed HTTP request headers|`boolean`|`false` +|requestTimeout|The maximum amount of time that a request is allowed to remain open|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` +|responseField|The name of a JSON field that is provided in the response, that contains the tezos address (default `address`)|`string`|`address` +|retainOriginal|When true the original pre-resolved string is retained after the lookup, and passed down to Tezosconnect as the from address|`boolean`|`` +|tlsHandshakeTimeout|The maximum amount of time to wait for a successful TLS handshake|[`time.Duration`](https://pkg.go.dev/time#Duration)|`10s` +|url|The URL of the Address Resolver|`string`|`` +|urlTemplate|The URL Go template string to use when calling the Address Resolver. The template input contains '.Key' and '.Intent' string variables.|[Go Template](https://pkg.go.dev/text/template) `string`|`` + +## plugins.blockchain[].tezos.addressResolver.auth + +|Key|Description|Type|Default Value| +|---|-----------|----|-------------| +|password|Password|`string`|`` +|username|Username|`string`|`` + +## plugins.blockchain[].tezos.addressResolver.proxy + +|Key|Description|Type|Default Value| +|---|-----------|----|-------------| +|url|Optional HTTP proxy server to connect through|`string`|`` + +## plugins.blockchain[].tezos.addressResolver.retry + +|Key|Description|Type|Default Value| +|---|-----------|----|-------------| +|count|The maximum number of times to retry|`int`|`5` +|enabled|Enables retries|`boolean`|`false` +|errorStatusCodeRegex|The regex that the error response status code must match to trigger retry|`string`|`` +|initWaitTime|The initial retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`250ms` +|maxWaitTime|The maximum retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` + +## plugins.blockchain[].tezos.addressResolver.tls + +|Key|Description|Type|Default Value| +|---|-----------|----|-------------| +|caFile|The path to the CA file for TLS on this API|`string`|`` +|certFile|The path to the certificate file for TLS on this API|`string`|`` +|clientAuth|Enables or disables client auth for TLS on this API|`string`|`` +|enabled|Enables or disables TLS on this API|`boolean`|`false` +|insecureSkipHostVerify|When to true in unit test development environments to disable TLS verification. Use with extreme caution|`boolean`|`` +|keyFile|The path to the private key file for TLS on this API|`string`|`` +|requiredDNAttributes|A set of required subject DN attributes. Each entry is a regular expression, and the subject certificate must have a matching attribute of the specified type (CN, C, O, OU, ST, L, STREET, POSTALCODE, SERIALNUMBER are valid attributes)|`map[string]string`|`` + +## plugins.blockchain[].tezos.tezosconnect + +|Key|Description|Type|Default Value| +|---|-----------|----|-------------| +|batchSize|The number of events Tezosconnect should batch together for delivery to FireFly core. Only applies when automatically creating a new event stream|`int`|`50` +|batchTimeout|How long Tezosconnect should wait for new events to arrive and fill a batch, before sending the batch to FireFly core. Only applies when automatically creating a new event stream|[`time.Duration`](https://pkg.go.dev/time#Duration)|`500` +|connectionTimeout|The maximum amount of time that a connection is allowed to remain with no data transmitted|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` +|expectContinueTimeout|See [ExpectContinueTimeout in the Go docs](https://pkg.go.dev/net/http#Transport)|[`time.Duration`](https://pkg.go.dev/time#Duration)|`1s` +|headers|Adds custom headers to HTTP requests|`map[string]string`|`` +|idleTimeout|The max duration to hold a HTTP keepalive connection between calls|[`time.Duration`](https://pkg.go.dev/time#Duration)|`475ms` +|maxConnsPerHost|The max number of connections, per unique hostname. Zero means no limit|`int`|`0` +|maxIdleConns|The max number of idle connections to hold pooled|`int`|`100` +|passthroughHeadersEnabled|Enable passing through the set of allowed HTTP request headers|`boolean`|`false` +|prefixLong|The prefix that will be used for Tezosconnect specific HTTP headers when FireFly makes requests to Tezosconnect|`string`|`firefly` +|prefixShort|The prefix that will be used for Tezosconnect specific query parameters when FireFly makes requests to Tezosconnect|`string`|`fly` +|requestTimeout|The maximum amount of time that a request is allowed to remain open|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` +|tlsHandshakeTimeout|The maximum amount of time to wait for a successful TLS handshake|[`time.Duration`](https://pkg.go.dev/time#Duration)|`10s` +|topic|The websocket listen topic that the node should register on, which is important if there are multiple nodes using a single tezosconnect|`string`|`` +|url|The URL of the Tezosconnect instance|URL `string`|`` + +## plugins.blockchain[].tezos.tezosconnect.auth + +|Key|Description|Type|Default Value| +|---|-----------|----|-------------| +|password|Password|`string`|`` +|username|Username|`string`|`` + +## plugins.blockchain[].tezos.tezosconnect.backgroundStart + +|Key|Description|Type|Default Value| +|---|-----------|----|-------------| +|enabled|Start the Tezosconnect plugin in the background and enter retry loop if failed to start|`boolean`|`` +|factor|Set the factor by which the delay increases when retrying|`float32`|`2` +|initialDelay|Delay between restarts in the case where we retry to restart the tezos plugin|[`time.Duration`](https://pkg.go.dev/time#Duration)|`5s` +|maxDelay|Max delay between restarts in the case where we retry to restart the tezos plugin|[`time.Duration`](https://pkg.go.dev/time#Duration)|`1m` + +## plugins.blockchain[].tezos.tezosconnect.proxy + +|Key|Description|Type|Default Value| +|---|-----------|----|-------------| +|url|Optional HTTP proxy server to use when connecting to Tezosconnect|URL `string`|`` + +## plugins.blockchain[].tezos.tezosconnect.retry + +|Key|Description|Type|Default Value| +|---|-----------|----|-------------| +|count|The maximum number of times to retry|`int`|`5` +|enabled|Enables retries|`boolean`|`false` +|errorStatusCodeRegex|The regex that the error response status code must match to trigger retry|`string`|`` +|initWaitTime|The initial retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`250ms` +|maxWaitTime|The maximum retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` + +## plugins.blockchain[].tezos.tezosconnect.tls + +|Key|Description|Type|Default Value| +|---|-----------|----|-------------| +|caFile|The path to the CA file for TLS on this API|`string`|`` +|certFile|The path to the certificate file for TLS on this API|`string`|`` +|clientAuth|Enables or disables client auth for TLS on this API|`string`|`` +|enabled|Enables or disables TLS on this API|`boolean`|`false` +|insecureSkipHostVerify|When to true in unit test development environments to disable TLS verification. Use with extreme caution|`boolean`|`` +|keyFile|The path to the private key file for TLS on this API|`string`|`` +|requiredDNAttributes|A set of required subject DN attributes. Each entry is a regular expression, and the subject certificate must have a matching attribute of the specified type (CN, C, O, OU, ST, L, STREET, POSTALCODE, SERIALNUMBER are valid attributes)|`map[string]string`|`` + +## plugins.blockchain[].tezos.tezosconnect.ws + +|Key|Description|Type|Default Value| +|---|-----------|----|-------------| +|connectionTimeout|The amount of time to wait while establishing a connection (or auto-reconnection)|[`time.Duration`](https://pkg.go.dev/time#Duration)|`45s` +|heartbeatInterval|The amount of time to wait between heartbeat signals on the WebSocket connection|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` +|initialConnectAttempts|The number of attempts FireFly will make to connect to the WebSocket when starting up, before failing|`int`|`5` +|path|The WebSocket sever URL to which FireFly should connect|WebSocket URL `string`|`` +|readBufferSize|The size in bytes of the read buffer for the WebSocket connection|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`16Kb` +|url|URL to use for WebSocket - overrides url one level up (in the HTTP config)|`string`|`` |writeBufferSize|The size in bytes of the write buffer for the WebSocket connection|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`16Kb` ## plugins.database[] @@ -832,6 +1043,7 @@ nav_order: 2 |idleTimeout|The max duration to hold a HTTP keepalive connection between calls|[`time.Duration`](https://pkg.go.dev/time#Duration)|`475ms` |initEnabled|Instructs FireFly to always post all current nodes to the `/init` API before connecting or reconnecting to the connector|`boolean`|`false` |manifestEnabled|Determines whether to require+validate a manifest from other DX instances in the network. Must be supported by the connector|`string`|`false` +|maxConnsPerHost|The max number of connections, per unique hostname. Zero means no limit|`int`|`0` |maxIdleConns|The max number of idle connections to hold pooled|`int`|`100` |passthroughHeadersEnabled|Enable passing through the set of allowed HTTP request headers|`boolean`|`false` |requestTimeout|The maximum amount of time that a request is allowed to remain open|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` @@ -845,6 +1057,15 @@ nav_order: 2 |password|Password|`string`|`` |username|Username|`string`|`` +## plugins.dataexchange[].ffdx.backgroundStart + +|Key|Description|Type|Default Value| +|---|-----------|----|-------------| +|enabled|Start the data exchange plugin in the background and enter retry loop if failed to start|`boolean`|`false` +|factor|Set the factor by which the delay increases when retrying|`float32`|`2` +|initialDelay|Delay between restarts in the case where we retry to restart the data exchange plugin|[`time.Duration`](https://pkg.go.dev/time#Duration)|`5s` +|maxDelay|Max delay between restarts in the case where we retry to restart the data exchange plugin|[`time.Duration`](https://pkg.go.dev/time#Duration)|`1m` + ## plugins.dataexchange[].ffdx.eventRetry |Key|Description|Type|Default Value| @@ -865,6 +1086,7 @@ nav_order: 2 |---|-----------|----|-------------| |count|The maximum number of times to retry|`int`|`5` |enabled|Enables retries|`boolean`|`false` +|errorStatusCodeRegex|The regex that the error response status code must match to trigger retry|`string`|`` |initWaitTime|The initial retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`250ms` |maxWaitTime|The maximum retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` @@ -876,16 +1098,20 @@ nav_order: 2 |certFile|The path to the certificate file for TLS on this API|`string`|`` |clientAuth|Enables or disables client auth for TLS on this API|`string`|`` |enabled|Enables or disables TLS on this API|`boolean`|`false` +|insecureSkipHostVerify|When to true in unit test development environments to disable TLS verification. Use with extreme caution|`boolean`|`` |keyFile|The path to the private key file for TLS on this API|`string`|`` +|requiredDNAttributes|A set of required subject DN attributes. Each entry is a regular expression, and the subject certificate must have a matching attribute of the specified type (CN, C, O, OU, ST, L, STREET, POSTALCODE, SERIALNUMBER are valid attributes)|`map[string]string`|`` ## plugins.dataexchange[].ffdx.ws |Key|Description|Type|Default Value| |---|-----------|----|-------------| +|connectionTimeout|The amount of time to wait while establishing a connection (or auto-reconnection)|[`time.Duration`](https://pkg.go.dev/time#Duration)|`45s` |heartbeatInterval|The amount of time to wait between heartbeat signals on the WebSocket connection|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` |initialConnectAttempts|The number of attempts FireFly will make to connect to the WebSocket when starting up, before failing|`int`|`5` |path|The WebSocket sever URL to which FireFly should connect|WebSocket URL `string`|`` |readBufferSize|The size in bytes of the read buffer for the WebSocket connection|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`16Kb` +|url|URL to use for WebSocket - overrides url one level up (in the HTTP config)|`string`|`` |writeBufferSize|The size in bytes of the write buffer for the WebSocket connection|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`16Kb` ## plugins.identity[] @@ -910,6 +1136,7 @@ nav_order: 2 |expectContinueTimeout|See [ExpectContinueTimeout in the Go docs](https://pkg.go.dev/net/http#Transport)|[`time.Duration`](https://pkg.go.dev/time#Duration)|`1s` |headers|Adds custom headers to HTTP requests|`map[string]string`|`` |idleTimeout|The max duration to hold a HTTP keepalive connection between calls|[`time.Duration`](https://pkg.go.dev/time#Duration)|`475ms` +|maxConnsPerHost|The max number of connections, per unique hostname. Zero means no limit|`int`|`0` |maxIdleConns|The max number of idle connections to hold pooled|`int`|`100` |passthroughHeadersEnabled|Enable passing through the set of allowed HTTP request headers|`boolean`|`false` |requestTimeout|The maximum amount of time that a request is allowed to remain open|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` @@ -935,6 +1162,7 @@ nav_order: 2 |---|-----------|----|-------------| |count|The maximum number of times to retry|`int`|`5` |enabled|Enables retries|`boolean`|`false` +|errorStatusCodeRegex|The regex that the error response status code must match to trigger retry|`string`|`` |initWaitTime|The initial retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`250ms` |maxWaitTime|The maximum retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` @@ -946,7 +1174,9 @@ nav_order: 2 |certFile|The path to the certificate file for TLS on this API|`string`|`` |clientAuth|Enables or disables client auth for TLS on this API|`string`|`` |enabled|Enables or disables TLS on this API|`boolean`|`false` +|insecureSkipHostVerify|When to true in unit test development environments to disable TLS verification. Use with extreme caution|`boolean`|`` |keyFile|The path to the private key file for TLS on this API|`string`|`` +|requiredDNAttributes|A set of required subject DN attributes. Each entry is a regular expression, and the subject certificate must have a matching attribute of the specified type (CN, C, O, OU, ST, L, STREET, POSTALCODE, SERIALNUMBER are valid attributes)|`map[string]string`|`` ## plugins.sharedstorage[].ipfs.gateway @@ -956,6 +1186,7 @@ nav_order: 2 |expectContinueTimeout|See [ExpectContinueTimeout in the Go docs](https://pkg.go.dev/net/http#Transport)|[`time.Duration`](https://pkg.go.dev/time#Duration)|`1s` |headers|Adds custom headers to HTTP requests|`map[string]string`|`` |idleTimeout|The max duration to hold a HTTP keepalive connection between calls|[`time.Duration`](https://pkg.go.dev/time#Duration)|`475ms` +|maxConnsPerHost|The max number of connections, per unique hostname. Zero means no limit|`int`|`0` |maxIdleConns|The max number of idle connections to hold pooled|`int`|`100` |passthroughHeadersEnabled|Enable passing through the set of allowed HTTP request headers|`boolean`|`false` |requestTimeout|The maximum amount of time that a request is allowed to remain open|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` @@ -981,6 +1212,7 @@ nav_order: 2 |---|-----------|----|-------------| |count|The maximum number of times to retry|`int`|`5` |enabled|Enables retries|`boolean`|`false` +|errorStatusCodeRegex|The regex that the error response status code must match to trigger retry|`string`|`` |initWaitTime|The initial retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`250ms` |maxWaitTime|The maximum retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` @@ -992,7 +1224,9 @@ nav_order: 2 |certFile|The path to the certificate file for TLS on this API|`string`|`` |clientAuth|Enables or disables client auth for TLS on this API|`string`|`` |enabled|Enables or disables TLS on this API|`boolean`|`false` +|insecureSkipHostVerify|When to true in unit test development environments to disable TLS verification. Use with extreme caution|`boolean`|`` |keyFile|The path to the private key file for TLS on this API|`string`|`` +|requiredDNAttributes|A set of required subject DN attributes. Each entry is a regular expression, and the subject certificate must have a matching attribute of the specified type (CN, C, O, OU, ST, L, STREET, POSTALCODE, SERIALNUMBER are valid attributes)|`map[string]string`|`` ## plugins.tokens[] @@ -1010,6 +1244,7 @@ nav_order: 2 |expectContinueTimeout|See [ExpectContinueTimeout in the Go docs](https://pkg.go.dev/net/http#Transport)|[`time.Duration`](https://pkg.go.dev/time#Duration)|`1s` |headers|Adds custom headers to HTTP requests|`map[string]string`|`` |idleTimeout|The max duration to hold a HTTP keepalive connection between calls|[`time.Duration`](https://pkg.go.dev/time#Duration)|`475ms` +|maxConnsPerHost|The max number of connections, per unique hostname. Zero means no limit|`int`|`0` |maxIdleConns|The max number of idle connections to hold pooled|`int`|`100` |passthroughHeadersEnabled|Enable passing through the set of allowed HTTP request headers|`boolean`|`false` |requestTimeout|The maximum amount of time that a request is allowed to remain open|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` @@ -1023,6 +1258,15 @@ nav_order: 2 |password|Password|`string`|`` |username|Username|`string`|`` +## plugins.tokens[].fftokens.backgroundStart + +|Key|Description|Type|Default Value| +|---|-----------|----|-------------| +|enabled|Start the tokens plugin in the background and enter retry loop if failed to start|`boolean`|`false` +|factor|Set the factor by which the delay increases when retrying|`float32`|`2` +|initialDelay|Delay between restarts in the case where we retry to restart the token plugin|[`time.Duration`](https://pkg.go.dev/time#Duration)|`5s` +|maxDelay|Max delay between restarts in the case where we retry to restart the token plugin|[`time.Duration`](https://pkg.go.dev/time#Duration)|`1m` + ## plugins.tokens[].fftokens.eventRetry |Key|Description|Type|Default Value| @@ -1043,6 +1287,7 @@ nav_order: 2 |---|-----------|----|-------------| |count|The maximum number of times to retry|`int`|`5` |enabled|Enables retries|`boolean`|`false` +|errorStatusCodeRegex|The regex that the error response status code must match to trigger retry|`string`|`` |initWaitTime|The initial retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`250ms` |maxWaitTime|The maximum retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` @@ -1054,41 +1299,45 @@ nav_order: 2 |certFile|The path to the certificate file for TLS on this API|`string`|`` |clientAuth|Enables or disables client auth for TLS on this API|`string`|`` |enabled|Enables or disables TLS on this API|`boolean`|`false` +|insecureSkipHostVerify|When to true in unit test development environments to disable TLS verification. Use with extreme caution|`boolean`|`` |keyFile|The path to the private key file for TLS on this API|`string`|`` +|requiredDNAttributes|A set of required subject DN attributes. Each entry is a regular expression, and the subject certificate must have a matching attribute of the specified type (CN, C, O, OU, ST, L, STREET, POSTALCODE, SERIALNUMBER are valid attributes)|`map[string]string`|`` ## plugins.tokens[].fftokens.ws |Key|Description|Type|Default Value| |---|-----------|----|-------------| +|connectionTimeout|The amount of time to wait while establishing a connection (or auto-reconnection)|[`time.Duration`](https://pkg.go.dev/time#Duration)|`45s` |heartbeatInterval|The amount of time to wait between heartbeat signals on the WebSocket connection|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` |initialConnectAttempts|The number of attempts FireFly will make to connect to the WebSocket when starting up, before failing|`int`|`5` |path|The WebSocket sever URL to which FireFly should connect|WebSocket URL `string`|`` |readBufferSize|The size in bytes of the read buffer for the WebSocket connection|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`16Kb` +|url|URL to use for WebSocket - overrides url one level up (in the HTTP config)|`string`|`` |writeBufferSize|The size in bytes of the write buffer for the WebSocket connection|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`16Kb` ## privatemessaging.batch |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|agentTimeout|How long to keep around a batching agent for a sending identity before disposal|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` -|payloadLimit|The maximum payload size of a private message Data Exchange payload|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`` -|size|The maximum number of messages in a batch for private messages|`int`|`` -|timeout|The timeout to wait for a batch to fill, before sending|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` +|agentTimeout|How long to keep around a batching agent for a sending identity before disposal|[`time.Duration`](https://pkg.go.dev/time#Duration)|`2m` +|payloadLimit|The maximum payload size of a private message Data Exchange payload|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`800Kb` +|size|The maximum number of messages in a batch for private messages|`int`|`200` +|timeout|The timeout to wait for a batch to fill, before sending|[`time.Duration`](https://pkg.go.dev/time#Duration)|`1s` ## privatemessaging.retry |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|factor|The retry backoff factor|`float32`|`` -|initDelay|The initial retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` -|maxDelay|The maximum retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` +|factor|The retry backoff factor|`float32`|`2` +|initDelay|The initial retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`100ms` +|maxDelay|The maximum retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` ## spi |Key|Description|Type|Default Value| |---|-----------|----|-------------| |address|The IP address on which the admin HTTP API should listen|IP Address `string`|`127.0.0.1` -|enabled|Enables the admin HTTP API|`boolean`|`` +|enabled|Enables the admin HTTP API|`boolean`|`false` |port|The port on which the admin HTTP API should listen|`int`|`5001` |publicURL|The fully qualified public URL for the admin API. This is used for building URLs in HTTP responses and in OpenAPI Spec generation|URL `string`|`` |readTimeout|The maximum time to wait when reading from an HTTP connection|[`time.Duration`](https://pkg.go.dev/time#Duration)|`15s` @@ -1115,40 +1364,50 @@ nav_order: 2 |certFile|The path to the certificate file for TLS on this API|`string`|`` |clientAuth|Enables or disables client auth for TLS on this API|`string`|`` |enabled|Enables or disables TLS on this API|`boolean`|`false` +|insecureSkipHostVerify|When to true in unit test development environments to disable TLS verification. Use with extreme caution|`boolean`|`` |keyFile|The path to the private key file for TLS on this API|`string`|`` +|requiredDNAttributes|A set of required subject DN attributes. Each entry is a regular expression, and the subject certificate must have a matching attribute of the specified type (CN, C, O, OU, ST, L, STREET, POSTALCODE, SERIALNUMBER are valid attributes)|`map[string]string`|`` ## spi.ws |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|blockedWarnInterval|How often to log warnings in core, when an admin change event listener falls behind the stream they requested and misses events|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` -|eventQueueLength|Server-side queue length for events waiting for delivery over an admin change event listener websocket|`int`|`` -|readBufferSize|The size in bytes of the read buffer for the WebSocket connection|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`` -|writeBufferSize|The size in bytes of the write buffer for the WebSocket connection|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`` +|blockedWarnInterval|How often to log warnings in core, when an admin change event listener falls behind the stream they requested and misses events|[`time.Duration`](https://pkg.go.dev/time#Duration)|`1m` +|eventQueueLength|Server-side queue length for events waiting for delivery over an admin change event listener websocket|`int`|`250` +|readBufferSize|The size in bytes of the read buffer for the WebSocket connection|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`16Kb` +|writeBufferSize|The size in bytes of the write buffer for the WebSocket connection|[`BytesSize`](https://pkg.go.dev/github.com/docker/go-units#BytesSize)|`16Kb` ## subscription |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|max|The maximum number of pre-defined subscriptions that can exist (note for high fan-out consider connecting a dedicated pub/sub broker to the dispatcher)|`int`|`` +|max|The maximum number of pre-defined subscriptions that can exist (note for high fan-out consider connecting a dedicated pub/sub broker to the dispatcher)|`int`|`500` ## subscription.defaults |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|batchSize|Default read ahead to enable for subscriptions that do not explicitly configure readahead|`int`|`` +|batchSize|Default read ahead to enable for subscriptions that do not explicitly configure readahead|`int`|`0` ## subscription.retry |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|factor|The retry backoff factor|`float32`|`` -|initDelay|The initial retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` -|maxDelay|The maximum retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`` +|factor|The retry backoff factor|`float32`|`2` +|initDelay|The initial retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`250ms` +|maxDelay|The maximum retry delay|[`time.Duration`](https://pkg.go.dev/time#Duration)|`30s` + +## transaction.writer + +|Key|Description|Type|Default Value| +|---|-----------|----|-------------| +|batchMaxTransactions|The maximum number of transaction inserts to include in a batch|`int`|`100` +|batchTimeout|How long to wait for more transactions to arrive before flushing the batch|[`time.Duration`](https://pkg.go.dev/time#Duration)|`10ms` +|count|The number of message writer workers|`int`|`5` ## ui |Key|Description|Type|Default Value| |---|-----------|----|-------------| -|enabled|Enables the web user interface|`boolean`|`` +|enabled|Enables the web user interface|`boolean`|`true` |path|The file system path which contains the static HTML, CSS, and JavaScript files for the user interface|`string`|`` \ No newline at end of file diff --git a/docs/reference/idempotency.md b/docs/reference/idempotency.md new file mode 100644 index 0000000000..b8470e507c --- /dev/null +++ b/docs/reference/idempotency.md @@ -0,0 +1,93 @@ +--- +layout: default +title: Idempotency Keys +parent: pages.reference +nav_order: 10 +--- + +# Idempotency Keys +{: .no_toc } + +## Table of contents +{: .no_toc .text-delta } + +1. TOC +{:toc} + +--- + +## Idempotency + +The transaction submission REST APIs of Hyperledger FireFly are idempotent. + +Idempotent APIs allow an application to safely submit a request multiple times, and for the transaction +to only be accepted and executed once. + +This is the well accepted approach for REST APIs over HTTP/HTTPS to achieve resilience, as HTTP requests +can fail in indeterminate ways. For example in a request or gateway timeout situation, the requester is +unable to know whether the request will or will not eventually be processed. + +There are various types of FireFly [transaction](../reference/types/transaction.html) that can be submitted. +These include direct submission of blockchain transactions to a smart contract, as well as more complex +transactions including coordination of multiple [operations](../reference/types/operation.html) +across on-chain and off-chain connectors. + +In order for Hyperledger FireFly to deduplicate transactions, and make them idempotent, the application +must supply an `idempotencyKey` on each API request. + +## FireFly Idempotency Keys + +[![Idempotency Keys Architecture](../images/idempotency_keys_architecture.jpg "Idempotency Keys Architecture")](../images/idempotency_keys_architecture.jpg) + +The caller of the API specifies its own unique identifier (an arbitrary string up to 256 characters) +that uniquely identifies the request, in the `idempotencyKey` field of the API. + +So if there is a network connectivity failure, or an abrupt termination of either runtime, the application +can safely attempt to resubmit the REST API call and be returned a `409 Conflict` HTTP code. + +Examples of how an app might construct such an idempotencyKey include: +- Unique business identifiers from the request that comes into its API up-stream - passing idempotency along the chain +- A hash of the business unique data that relates to the request - maybe all the input data of a blockchain transaction for example, if that payload is guaranteed to be unique. + > Be careful of cases where the business data might _not_ be unique - like a transfer of 10 coins from A to B. + > + > Such a transfer could happen multiple times, and each would be a separate business transaction. + > + > Where as transfer with invoice number `abcd1234` of 10 coins from A to B would be assured to be unique. +- A unique identifier of a business transaction generated within the application and stored in its database before submission + > This moves the challenge up one layer into your application. How does that unique ID get generated? Is that + > itself idempotent? + +## Operation Idempotency + +FireFly provides an idempotent interface downstream to connectors. + +Each [operation](../reference/types/operation.html) within a FireFly [transaction](../reference/types/transaction.html) +receives a unique ID within the overall transaction that is used as an idempotency key when invoking that connector. + +Well formed connectors honor this idempotency key internally, ensuring that the end-to-end transaction submission is +idempotent. + +Key examples of such connectors are EVMConnect and others built on the +[Blockchain Connector Toolkit](../architecture/blockchain_connector_framework.html). + +When an operation is retried automatically, the same idempotency key is re-used to avoid resubmission. + +## Short term retry + +The FireFly core uses standard HTTP request code to communicate with all connector APIs. + +This code include exponential backoff retry, that can be enabled with a simple boolean in the plugin +of FireFly core. The minimum retry, maximum retry, and backoff factor can be tuned individually +as well on each connector. + +See [Configuration Reference](../reference/config.html) for more information. + +## Administrative operation retry + +The `operations/{operationId}/retry` API can be called administratively to resubmit a +transaction that has reached `Failed` status, or otherwise been determined by an operator/monitor to be +unrecoverable within the connector. + +In this case, the previous operation is marked `Retried`, a new operation ID is allocated, and +the operation is re-submitted to the connector with this new ID. + diff --git a/docs/reference/microservices/fftokens.md b/docs/reference/microservices/fftokens.md index 6c0d27ad63..ca3ad3b8a3 100644 --- a/docs/reference/microservices/fftokens.md +++ b/docs/reference/microservices/fftokens.md @@ -79,7 +79,7 @@ _See [Response Types: Async Request](#async-request)_ ### `POST /activatepool` -Activate a token pool to begin receiving events. Generally this means the connector will create blockchain event subscriptions to transfer and approval events related to the set of tokens encompassed by this token pool. +Activate a token pool to begin receiving events. Generally this means the connector will create blockchain event listeners for transfer and approval events related to the set of tokens encompassed by this token pool. In a multiparty network, this step will be performed by every member after a successful token pool broadcast. It therefore also serves the purpose of validating the broadcast info - if the connector does not find a valid pool given the `poolLocator` and `config` information passed in to this call, the pool should not get confirmed. @@ -89,7 +89,6 @@ In a multiparty network, this step will be performed by every member after a suc { "poolLocator": "id=F1", "poolData": "extra-pool-info", - "requestId": "1", "config": {} } ``` @@ -98,7 +97,6 @@ In a multiparty network, this step will be performed by every member after a suc | ----------- | ------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | poolLocator | string | The locator of the pool, as supplied by the output of the pool creation. | | poolData | string | (OPTIONAL) A data string that should be permanently attached to this pool and returned in all events. | -| requestId | string | (OPTIONAL) A unique identifier for this request. Will be included in the "receipt" websocket event to match receipts to requests. | | config | object | (OPTIONAL) An arbitrary JSON object where the connector may accept additional parameters if desired. This should be the same `config` object that was passed when the pool was created. | **Response** @@ -115,6 +113,36 @@ HTTP 204: activation was successful - no separate receipt will be delivered, but _No body_ +### `POST /deactivatepool` + +Deactivate a token pool to stop receiving events and delete all blockchain listeners related to that pool. + +**Request** + +``` +{ + "poolLocator": "id=F1", + "poolData": "extra-pool-info", + "config": {} +} +``` + +| Parameter | Type | Description | +| ----------- | ------ | ---------------------------------------------------------------------------------------------------- | +| poolLocator | string | The locator of the pool, as supplied by the output of the pool creation. | +| poolData | string | (OPTIONAL) The data string that was attached to this pool at activation. | +| config | object | (OPTIONAL) An arbitrary JSON object where the connector may accept additional parameters if desired. | + +**Response** + +HTTP 204: deactivation was successful, and one or more listeners were deleted. + +_No body_ + +HTTP 404: no blockchain listeners were found for the given pool information. + +_No body_ + ### `POST /checkinterface` This is an optional (but recommended) API for token connectors. If implemented, support will be indicated by diff --git a/docs/reference/microservices/index.md b/docs/reference/microservices/index.md index d376d479c1..fc2312715c 100644 --- a/docs/reference/microservices/index.md +++ b/docs/reference/microservices/index.md @@ -2,6 +2,6 @@ layout: default title: Microservices parent: pages.reference -nav_order: 10 +nav_order: 11 has_children: true --- diff --git a/docs/reference/tls.md b/docs/reference/tls.md new file mode 100644 index 0000000000..b709d429ff --- /dev/null +++ b/docs/reference/tls.md @@ -0,0 +1,63 @@ +--- +layout: i18n_page +title: pages.tls +parent: pages.reference +nav_order: 11 +--- + +# TLS +{: .no_toc } + +## Table of contents +{: .no_toc .text-delta } + +1. TOC +{:toc} + +--- + +## TLS Overview + + +To enable TLS in Firefly, there is a configuration available to provide certificates and keys. + +The common configuration is as such: + +```yaml +tls: + enabled: true/false # Toggle on or off TLS + caFile: + certFile: + keyFile: + clientAuth: true/false # Only applicable to the server side, to toggle on or off client authentication + requiredDNAttributes: A set of required subject DN attributes. Each entry is a regular expression, and the subject certificate must have a matching attribute of the specified type (CN, C, O, OU, ST, L, STREET, POSTALCODE, SERIALNUMBER are valid attributes) +``` + +**NOTE** The CAs, certificates and keys have to be in PEM format. + +## Configuring TLS for the API server + +Using the above configuration, we can place it under the `http` config and enable TLS or mTLS for any API call. + +[See this config section for details](config.html#httptls) + +## Configuring TLS for the webhooks + +Using the above configuration, we can place it under the `events.webhooks` config and enable TLS or mTLS for any webhook call. + +[See this config section for details](config.html#eventswebhookstls) + + +## Configuring clients and websockets + +Firefly has a set of HTTP clients and websockets that communicate the external endpoints and services that could be secured using TLS. +In order to configure these clients, we can use the same configuration as above in the respective places in the config which relate to those clients. + +For example, if you wish to configure the ethereum blockchain connector with TLS you would look at [this config section](config.html#pluginsblockchainethereumethconnecttls) + +For more clients, search in the [configuration reference](config.html) for a TLS section. + + +## Enhancing validation of certificates + +In the case where we want to verify that a specific client certificate has certain attributes we can use the `requiredDNAtributes` configuration as described above. This will allow you by the means of a regex expresssion matching against well known distinguished names (DN). To learn more about a DNs look at [this document](https://datatracker.ietf.org/doc/rfc4514/) diff --git a/docs/reference/types/contractapi.md b/docs/reference/types/contractapi.md index 6c812ade13..403fd79537 100644 --- a/docs/reference/types/contractapi.md +++ b/docs/reference/types/contractapi.md @@ -37,7 +37,8 @@ nav_order: 4 "urls": { "openapi": "http://127.0.0.1:5000/api/v1/namespaces/default/apis/my_contract_api/api/swagger.json", "ui": "http://127.0.0.1:5000/api/v1/namespaces/default/apis/my_contract_api/api" - } + }, + "published": false } ``` @@ -50,8 +51,10 @@ nav_order: 4 | `interface` | Reference to the FireFly Interface definition associated with the contract API | [`FFIReference`](#ffireference) | | `location` | If this API is tied to an individual instance of a smart contract, this field can include a blockchain specific contract identifier. For example an Ethereum contract address, or a Fabric chaincode name and channel | [`JSONAny`](simpletypes#jsonany) | | `name` | The name that is used in the URL to access the API | `string` | +| `networkName` | The published name of the API within the multiparty network | `string` | | `message` | The UUID of the broadcast message that was used to publish this API to the network | [`UUID`](simpletypes#uuid) | | `urls` | The URLs to use to access the API | [`ContractURLs`](#contracturls) | +| `published` | Indicates if the API is published to other members of the multiparty network | `bool` | ## FFIReference diff --git a/docs/reference/types/ffi.md b/docs/reference/types/ffi.md index c8e73176f3..01159395c5 100644 --- a/docs/reference/types/ffi.md +++ b/docs/reference/types/ffi.md @@ -109,7 +109,8 @@ nav_order: 9 } ] } - ] + ], + "published": false } ``` @@ -121,11 +122,13 @@ nav_order: 9 | `message` | The UUID of the broadcast message that was used to publish this FFI to the network | [`UUID`](simpletypes#uuid) | | `namespace` | The namespace of the FFI | `string` | | `name` | The name of the FFI - usually matching the smart contract name | `string` | +| `networkName` | The published name of the FFI within the multiparty network | `string` | | `description` | A description of the smart contract this FFI represents | `string` | | `version` | A version for the FFI - use of semantic versioning such as 'v1.0.1' is encouraged | `string` | | `methods` | An array of smart contract method definitions | [`FFIMethod[]`](#ffimethod) | | `events` | An array of smart contract event definitions | [`FFIEvent[]`](#ffievent) | | `errors` | An array of smart contract error definitions | [`FFIError[]`](#ffierror) | +| `published` | Indicates if the FFI is published to other members of the multiparty network | `bool` | ## FFIMethod diff --git a/docs/reference/types/message.md b/docs/reference/types/message.md index 0d919473c6..fc1ea92c20 100644 --- a/docs/reference/types/message.md +++ b/docs/reference/types/message.md @@ -63,6 +63,7 @@ nav_order: 16 | `txid` | The ID of the transaction used to order/deliver this message | [`UUID`](simpletypes#uuid) | | `state` | The current state of the message | `FFEnum`:
`"staged"`
`"ready"`
`"sent"`
`"pending"`
`"confirmed"`
`"rejected"` | | `confirmed` | The timestamp of when the message was confirmed/rejected | [`FFTime`](simpletypes#fftime) | +| `rejectReason` | If a message was rejected, provides details on the rejection reason | `string` | | `data` | The list of data elements attached to the message | [`DataRef[]`](#dataref) | | `pins` | For private messages, a unique pin hash:nonce is assigned for each topic | `string[]` | | `idempotencyKey` | An optional unique identifier for a message. Cannot be duplicated within a namespace, thus allowing idempotent submission of messages to the API. Local only - not transferred when the message is sent to other members of the network | `IdempotencyKey` | diff --git a/docs/reference/types/subscription.md b/docs/reference/types/subscription.md index c9cf23fbfe..6ee7b937f9 100644 --- a/docs/reference/types/subscription.md +++ b/docs/reference/types/subscription.md @@ -105,6 +105,8 @@ nav_order: 3 | `firstEvent` | Whether your application would like to receive events from the 'oldest' event emitted by your FireFly node (from the beginning of time), or the 'newest' event (from now), or a specific event sequence. Default is 'newest' | `SubOptsFirstEvent` | | `readAhead` | The number of events to stream ahead to your application, while waiting for confirmation of consumption of those events. At least once delivery semantics are used in FireFly, so if your application crashes/reconnects this is the maximum number of events you would expect to be redelivered after it restarts | `uint16` | | `withData` | Whether message events delivered over the subscription, should be packaged with the full data of those messages in-line as part of the event JSON payload. Or if the application should make separate REST calls to download that data. May not be supported on some transports. | `bool` | +| `batch` | Events are delivered in batches in an ordered array. The batch size is capped to the readAhead limit. The event payload is always an array even if there is a single event in the batch. Commonly used with Webhooks to allow events to be delivered and acknowledged in batches. | `bool` | +| `batchTimeout` | When batching is enabled, the optional timeout to send events even when the batch hasn't filled. | `string` | | `fastack` | Webhooks only: When true the event will be acknowledged before the webhook is invoked, allowing parallel invocations | `bool` | | `url` | Webhooks only: HTTP url to invoke. Can be relative if a base URL is set in the webhook plugin config | `string` | | `method` | Webhooks only: HTTP method to invoke. Default=POST | `string` | @@ -114,7 +116,10 @@ nav_order: 3 | `replytx` | Webhooks only: The transaction type to set on the reply message | `string` | | `headers` | Webhooks only: Static headers to set on the webhook request | `` | | `query` | Webhooks only: Static query params to set on the webhook request | `` | +| `tlsConfigName` | The name of an existing TLS configuration associated to the namespace to use | `string` | | `input` | Webhooks only: A set of options to extract data from the first JSON input data in the incoming message. Only applies if withData=true | [`WebhookInputOptions`](#webhookinputoptions) | +| `retry` | Webhooks only: a set of options for retrying the webhook call | [`WebhookRetryOptions`](#webhookretryoptions) | +| `httpOptions` | Webhooks only: a set of options for HTTP | [`WebhookHTTPOptions`](#webhookhttpoptions) | ## WebhookInputOptions @@ -127,4 +132,27 @@ nav_order: 3 | `replytx` | A top-level property of the first data input, to use to dynamically set whether to pin the response (so the requester can choose) | `string` | +## WebhookRetryOptions + +| Field Name | Description | Type | +|------------|-------------|------| +| `enabled` | Enables retry on HTTP calls, defaults to false | `bool` | +| `count` | Number of times to retry the webhook call in case of failure | `int` | +| `initialDelay` | Initial delay between retries when we retry the webhook call | `string` | +| `maxDelay` | Max delay between retries when we retry the webhookcall | `string` | + + +## WebhookHTTPOptions + +| Field Name | Description | Type | +|------------|-------------|------| +| `proxyURL` | HTTP proxy URL to use for outbound requests to the webhook | `string` | +| `tlsHandshakeTimeout` | The max duration to hold a TLS handshake alive | `string` | +| `requestTimeout` | The max duration to hold a TLS handshake alive | `string` | +| `maxIdleConns` | The max number of idle connections to hold pooled | `int` | +| `idleTimeout` | The max duration to hold a HTTP keepalive connection between calls | `string` | +| `connectionTimeout` | The maximum amount of time that a connection is allowed to remain with no data transmitted. | `string` | +| `expectContinueTimeout` | See [ExpectContinueTimeout in the Go docs](https://pkg.go.dev/net/http#Transport) | `string` | + + diff --git a/docs/reference/types/tokenpool.md b/docs/reference/types/tokenpool.md index 88b978b5cf..42099941d4 100644 --- a/docs/reference/types/tokenpool.md +++ b/docs/reference/types/tokenpool.md @@ -33,7 +33,7 @@ nav_order: 11 "decimals": 18, "connector": "erc20_erc721", "message": "43923040-b1e5-4164-aa20-47636c7177ee", - "state": "confirmed", + "active": true, "created": "2022-05-16T01:23:15Z", "info": { "address": "0x056df1c53c3c00b0e13d37543f46930b42f71db0", @@ -43,7 +43,8 @@ nav_order: 11 "tx": { "type": "token_pool", "id": "a23ffc87-81a2-4cbc-97d6-f53d320c36cd" - } + }, + "published": false } ``` @@ -55,14 +56,15 @@ nav_order: 11 | `type` | The type of token the pool contains, such as fungible/non-fungible | `FFEnum`:
`"fungible"`
`"nonfungible"` | | `namespace` | The namespace for the token pool | `string` | | `name` | The name of the token pool. Note the name is not validated against the description of the token on the blockchain | `string` | +| `networkName` | The published name of the token pool within the multiparty network | `string` | | `standard` | The ERC standard the token pool conforms to, as reported by the token connector | `string` | | `locator` | A unique identifier for the pool, as provided by the token connector | `string` | | `key` | The signing key used to create the token pool. On input for token connectors that support on-chain deployment of new tokens (vs. only index existing ones) this determines the signing key used to create the token on-chain | `string` | | `symbol` | The token symbol. If supplied on input for an existing on-chain token, this must match the on-chain information | `string` | | `decimals` | Number of decimal places that this token has | `int` | | `connector` | The name of the token connector, as specified in the FireFly core configuration file that is responsible for the token pool. Required on input when multiple token connectors are configured | `string` | -| `message` | The UUID of the broadcast message used to inform the network to index this pool | [`UUID`](simpletypes#uuid) | -| `state` | The current state of the token pool | `FFEnum`:
`"pending"`
`"confirmed"` | +| `message` | The UUID of the broadcast message used to inform the network about this pool | [`UUID`](simpletypes#uuid) | +| `active` | Indicates whether the pool has been successfully activated with the token connector | `bool` | | `created` | The creation time of the pool | [`FFTime`](simpletypes#fftime) | | `config` | Input only field, with token connector specific configuration of the pool, such as an existing Ethereum address and block number to used to index the pool. See your chosen token connector documentation for details | [`JSONObject`](simpletypes#jsonobject) | | `info` | Token connector specific information about the pool. See your chosen token connector documentation for details | [`JSONObject`](simpletypes#jsonobject) | @@ -70,6 +72,7 @@ nav_order: 11 | `interface` | A reference to an existing FFI, containing pre-registered type information for the token contract | [`FFIReference`](#ffireference) | | `interfaceFormat` | The interface encoding format supported by the connector for this token pool | `FFEnum`:
`"abi"`
`"ffi"` | | `methods` | The method definitions resolved by the token connector to be used by each token operation | [`JSONAny`](simpletypes#jsonany) | +| `published` | Indicates if the token pool is published to other members of the multiparty network | `bool` | ## TransactionRef diff --git a/docs/reference/types/verifier.md b/docs/reference/types/verifier.md index 4881d2c5ca..caebcfdd0c 100644 --- a/docs/reference/types/verifier.md +++ b/docs/reference/types/verifier.md @@ -39,7 +39,7 @@ nav_order: 15 | `hash` | Hash used as a globally consistent identifier for this namespace + type + value combination on every node in the network | `Bytes32` | | `identity` | The UUID of the parent identity that has claimed this verifier | [`UUID`](simpletypes#uuid) | | `namespace` | The namespace of the verifier | `string` | -| `type` | The type of the verifier | `FFEnum`:
`"ethereum_address"`
`"fabric_msp_id"`
`"dx_peer_id"` | +| `type` | The type of the verifier | `FFEnum`:
`"ethereum_address"`
`"tezos_address"`
`"fabric_msp_id"`
`"dx_peer_id"` | | `value` | The verifier string, such as an Ethereum address, or Fabric MSP identifier | `string` | | `created` | The time this verifier was created on this node | [`FFTime`](simpletypes#fftime) | diff --git a/docs/reference/types/wsstart.md b/docs/reference/types/wsstart.md index 2133a8cdcf..3c606a79b9 100644 --- a/docs/reference/types/wsstart.md +++ b/docs/reference/types/wsstart.md @@ -96,6 +96,8 @@ nav_order: 23 | `firstEvent` | Whether your application would like to receive events from the 'oldest' event emitted by your FireFly node (from the beginning of time), or the 'newest' event (from now), or a specific event sequence. Default is 'newest' | `SubOptsFirstEvent` | | `readAhead` | The number of events to stream ahead to your application, while waiting for confirmation of consumption of those events. At least once delivery semantics are used in FireFly, so if your application crashes/reconnects this is the maximum number of events you would expect to be redelivered after it restarts | `uint16` | | `withData` | Whether message events delivered over the subscription, should be packaged with the full data of those messages in-line as part of the event JSON payload. Or if the application should make separate REST calls to download that data. May not be supported on some transports. | `bool` | +| `batch` | Events are delivered in batches in an ordered array. The batch size is capped to the readAhead limit. The event payload is always an array even if there is a single event in the batch. Commonly used with Webhooks to allow events to be delivered and acknowledged in batches. | `bool` | +| `batchTimeout` | When batching is enabled, the optional timeout to send events even when the batch hasn't filled. | `string` | | `fastack` | Webhooks only: When true the event will be acknowledged before the webhook is invoked, allowing parallel invocations | `bool` | | `url` | Webhooks only: HTTP url to invoke. Can be relative if a base URL is set in the webhook plugin config | `string` | | `method` | Webhooks only: HTTP method to invoke. Default=POST | `string` | @@ -105,7 +107,10 @@ nav_order: 23 | `replytx` | Webhooks only: The transaction type to set on the reply message | `string` | | `headers` | Webhooks only: Static headers to set on the webhook request | `` | | `query` | Webhooks only: Static query params to set on the webhook request | `` | +| `tlsConfigName` | The name of an existing TLS configuration associated to the namespace to use | `string` | | `input` | Webhooks only: A set of options to extract data from the first JSON input data in the incoming message. Only applies if withData=true | [`WebhookInputOptions`](#webhookinputoptions) | +| `retry` | Webhooks only: a set of options for retrying the webhook call | [`WebhookRetryOptions`](#webhookretryoptions) | +| `httpOptions` | Webhooks only: a set of options for HTTP | [`WebhookHTTPOptions`](#webhookhttpoptions) | ## WebhookInputOptions @@ -118,4 +123,27 @@ nav_order: 23 | `replytx` | A top-level property of the first data input, to use to dynamically set whether to pin the response (so the requester can choose) | `string` | +## WebhookRetryOptions + +| Field Name | Description | Type | +|------------|-------------|------| +| `enabled` | Enables retry on HTTP calls, defaults to false | `bool` | +| `count` | Number of times to retry the webhook call in case of failure | `int` | +| `initialDelay` | Initial delay between retries when we retry the webhook call | `string` | +| `maxDelay` | Max delay between retries when we retry the webhookcall | `string` | + + +## WebhookHTTPOptions + +| Field Name | Description | Type | +|------------|-------------|------| +| `proxyURL` | HTTP proxy URL to use for outbound requests to the webhook | `string` | +| `tlsHandshakeTimeout` | The max duration to hold a TLS handshake alive | `string` | +| `requestTimeout` | The max duration to hold a TLS handshake alive | `string` | +| `maxIdleConns` | The max number of idle connections to hold pooled | `int` | +| `idleTimeout` | The max duration to hold a HTTP keepalive connection between calls | `string` | +| `connectionTimeout` | The maximum amount of time that a connection is allowed to remain with no data transmitted. | `string` | +| `expectContinueTimeout` | See [ExpectContinueTimeout in the Go docs](https://pkg.go.dev/net/http#Transport) | `string` | + + diff --git a/docs/swagger/swagger.yaml b/docs/swagger/swagger.yaml index 0ff33d04c0..ff978ec5b8 100644 --- a/docs/swagger/swagger.yaml +++ b/docs/swagger/swagger.yaml @@ -1,6 +1,6 @@ components: {} info: - title: FireFly + title: Hyperledger FireFly version: "1.0" openapi: 3.0.2 paths: @@ -31,6 +31,16 @@ paths: name: name schema: type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: networkname + schema: + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: published + schema: + type: string - description: Sort field. For multi-field sort use comma separated values (or multiple query values) with '-' prefix for descending in: query @@ -108,6 +118,14 @@ paths: namespace: description: The namespace of the contract API type: string + networkName: + description: The published name of the API within the multiparty + network + type: string + published: + description: Indicates if the API is published to other members + of the multiparty network + type: boolean urls: description: The URLs to use to access the API properties: @@ -137,6 +155,12 @@ paths: schema: example: "true" type: string + - description: When true the definition will be published to all other members + of the multiparty network + in: query + name: publish + schema: + type: string - description: Server-side request timeout (milliseconds, or set a custom suffix like 10s) in: header @@ -172,6 +196,10 @@ paths: name: description: The name that is used in the URL to access the API type: string + networkName: + description: The published name of the API within the multiparty + network + type: string type: object responses: "200": @@ -214,6 +242,14 @@ paths: namespace: description: The namespace of the contract API type: string + networkName: + description: The published name of the API within the multiparty + network + type: string + published: + description: Indicates if the API is published to other members + of the multiparty network + type: boolean urls: description: The URLs to use to access the API properties: @@ -268,6 +304,14 @@ paths: namespace: description: The namespace of the contract API type: string + networkName: + description: The published name of the API within the multiparty + network + type: string + published: + description: Indicates if the API is published to other members + of the multiparty network + type: boolean urls: description: The URLs to use to access the API properties: @@ -287,6 +331,32 @@ paths: tags: - Default Namespace /apis/{apiName}: + delete: + description: Delete a contract API + operationId: deleteContractAPI + parameters: + - description: The name of the contract API + in: path + name: apiName + required: true + schema: + type: string + - description: Server-side request timeout (milliseconds, or set a custom suffix + like 10s) + in: header + name: Request-Timeout + schema: + default: 2m0s + type: string + responses: + "204": + content: + application/json: {} + description: Success + default: + description: "" + tags: + - Default Namespace get: description: Gets information about a contract API, including the URLs for the OpenAPI Spec and Swagger UI for the API @@ -346,6 +416,14 @@ paths: namespace: description: The namespace of the contract API type: string + networkName: + description: The published name of the API within the multiparty + network + type: string + published: + description: Indicates if the API is published to other members + of the multiparty network + type: boolean urls: description: The URLs to use to access the API properties: @@ -595,6 +673,14 @@ paths: namespace: description: The namespace of the FFI type: string + networkName: + description: The published name of the FFI within the multiparty + network + type: string + published: + description: Indicates if the FFI is published to other members + of the multiparty network + type: boolean version: description: A version for the FFI - use of semantic versioning such as 'v1.0.1' is encouraged @@ -1357,11 +1443,10 @@ paths: description: "" tags: - Default Namespace - /apis/{apiName}/query/{methodPath}: + /apis/{apiName}/publish: post: - description: Queries a method on a smart contract API. Performs a read-only - query. - operationId: postContractAPIQuery + description: Publish a contract API to all other members of the multiparty network + operationId: postContractAPIPublish parameters: - description: The name of the contract API in: path @@ -1369,85 +1454,10 @@ paths: required: true schema: type: string - - description: The name or uniquely generated path name of a method on a smart - contract - in: path - name: methodPath - required: true - schema: - type: string - - description: Server-side request timeout (milliseconds, or set a custom suffix - like 10s) - in: header - name: Request-Timeout - schema: - default: 2m0s - type: string - requestBody: - content: - application/json: - schema: - properties: - idempotencyKey: - description: An optional identifier to allow idempotent submission - of requests. Stored on the transaction uniquely within a namespace - type: string - input: - additionalProperties: - description: A map of named inputs. The name and type of each - input must be compatible with the FFI description of the method, - so that FireFly knows how to serialize it to the blockchain - via the connector - description: A map of named inputs. The name and type of each input - must be compatible with the FFI description of the method, so - that FireFly knows how to serialize it to the blockchain via the - connector - type: object - key: - description: The blockchain signing key that will sign the invocation. - Defaults to the first signing key of the organization that operates - the node - type: string - location: - description: A blockchain specific contract identifier. For example - an Ethereum contract address, or a Fabric chaincode name and channel - options: - additionalProperties: - description: A map of named inputs that will be passed through - to the blockchain connector - description: A map of named inputs that will be passed through to - the blockchain connector - type: object - type: object - responses: - "200": - content: - application/json: - schema: - additionalProperties: {} - type: object - description: Success - default: - description: "" - tags: - - Default Namespace - /apis/{id}: - put: - description: The ID of the contract API - operationId: putContractAPI - parameters: - - description: The name of the contract API - in: path - name: id - required: true - schema: - example: id - type: string - description: When true the HTTP request blocks until the message is confirmed in: query name: confirm schema: - example: "true" type: string - description: Server-side request timeout (milliseconds, or set a custom suffix like 10s) @@ -1461,28 +1471,9 @@ paths: application/json: schema: properties: - interface: - description: Reference to the FireFly Interface definition associated - with the contract API - properties: - id: - description: The UUID of the FireFly interface - format: uuid - type: string - name: - description: The name of the FireFly interface - type: string - version: - description: The version of the FireFly interface - type: string - type: object - location: - description: If this API is tied to an individual instance of a - smart contract, this field can include a blockchain specific contract - identifier. For example an Ethereum contract address, or a Fabric - chaincode name and channel - name: - description: The name that is used in the URL to access the API + networkName: + description: An optional name to be used for publishing this definition + to the multiparty network, which may differ from the local name type: string type: object responses: @@ -1491,53 +1482,225 @@ paths: application/json: schema: properties: + description: + description: A description of the smart contract this FFI represents + type: string + errors: + description: An array of smart contract error definitions + items: + description: An array of smart contract error definitions + properties: + description: + description: A description of the smart contract error + type: string + id: + description: The UUID of the FFI error definition + format: uuid + type: string + interface: + description: The UUID of the FFI smart contract definition + that this error is part of + format: uuid + type: string + name: + description: The name of the error + type: string + namespace: + description: The namespace of the FFI + type: string + params: + description: An array of error parameter/argument definitions + items: + description: An array of error parameter/argument definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + pathname: + description: The unique name allocated to this error within + the FFI for use on URL paths + type: string + signature: + description: The stringified signature of the error, as + computed by the blockchain plugin + type: string + type: object + type: array + events: + description: An array of smart contract event definitions + items: + description: An array of smart contract event definitions + properties: + description: + description: A description of the smart contract event + type: string + details: + additionalProperties: + description: Additional blockchain specific fields about + this event from the original smart contract. Used by + the blockchain plugin and for documentation generation. + description: Additional blockchain specific fields about + this event from the original smart contract. Used by the + blockchain plugin and for documentation generation. + type: object + id: + description: The UUID of the FFI event definition + format: uuid + type: string + interface: + description: The UUID of the FFI smart contract definition + that this event is part of + format: uuid + type: string + name: + description: The name of the event + type: string + namespace: + description: The namespace of the FFI + type: string + params: + description: An array of event parameter/argument definitions + items: + description: An array of event parameter/argument definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + pathname: + description: The unique name allocated to this event within + the FFI for use on URL paths. Supports contracts that + have multiple event overrides with the same name + type: string + signature: + description: The stringified signature of the event, as + computed by the blockchain plugin + type: string + type: object + type: array id: - description: The UUID of the contract API + description: The UUID of the FireFly interface (FFI) smart contract + definition format: uuid type: string - interface: - description: Reference to the FireFly Interface definition associated - with the contract API - properties: - id: - description: The UUID of the FireFly interface - format: uuid - type: string - name: - description: The name of the FireFly interface - type: string - version: - description: The version of the FireFly interface - type: string - type: object - location: - description: If this API is tied to an individual instance of - a smart contract, this field can include a blockchain specific - contract identifier. For example an Ethereum contract address, - or a Fabric chaincode name and channel message: description: The UUID of the broadcast message that was used to - publish this API to the network + publish this FFI to the network format: uuid type: string + methods: + description: An array of smart contract method definitions + items: + description: An array of smart contract method definitions + properties: + description: + description: A description of the smart contract method + type: string + details: + additionalProperties: + description: Additional blockchain specific fields about + this method from the original smart contract. Used by + the blockchain plugin and for documentation generation. + description: Additional blockchain specific fields about + this method from the original smart contract. Used by + the blockchain plugin and for documentation generation. + type: object + id: + description: The UUID of the FFI method definition + format: uuid + type: string + interface: + description: The UUID of the FFI smart contract definition + that this method is part of + format: uuid + type: string + name: + description: The name of the method + type: string + namespace: + description: The namespace of the FFI + type: string + params: + description: An array of method parameter/argument definitions + items: + description: An array of method parameter/argument definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + pathname: + description: The unique name allocated to this method within + the FFI for use on URL paths. Supports contracts that + have multiple method overrides with the same name + type: string + returns: + description: An array of method return definitions + items: + description: An array of method return definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + type: object + type: array name: - description: The name that is used in the URL to access the API + description: The name of the FFI - usually matching the smart + contract name type: string namespace: - description: The namespace of the contract API + description: The namespace of the FFI + type: string + networkName: + description: The published name of the FFI within the multiparty + network + type: string + published: + description: Indicates if the FFI is published to other members + of the multiparty network + type: boolean + version: + description: A version for the FFI - use of semantic versioning + such as 'v1.0.1' is encouraged type: string - urls: - description: The URLs to use to access the API - properties: - openapi: - description: The URL to download the OpenAPI v3 (Swagger) - description for the API generated in JSON or YAML format - type: string - ui: - description: The URL to use in a web browser to access the - SwaggerUI explorer/exerciser for the API - type: string - type: object type: object description: Success "202": @@ -1545,120 +1708,553 @@ paths: application/json: schema: properties: - id: - description: The UUID of the contract API - format: uuid - type: string - interface: - description: Reference to the FireFly Interface definition associated - with the contract API - properties: - id: - description: The UUID of the FireFly interface - format: uuid - type: string - name: - description: The name of the FireFly interface - type: string - version: - description: The version of the FireFly interface - type: string - type: object - location: - description: If this API is tied to an individual instance of - a smart contract, this field can include a blockchain specific - contract identifier. For example an Ethereum contract address, - or a Fabric chaincode name and channel - message: - description: The UUID of the broadcast message that was used to - publish this API to the network - format: uuid - type: string - name: - description: The name that is used in the URL to access the API - type: string - namespace: - description: The namespace of the contract API + description: + description: A description of the smart contract this FFI represents type: string - urls: - description: The URLs to use to access the API - properties: - openapi: - description: The URL to download the OpenAPI v3 (Swagger) - description for the API generated in JSON or YAML format - type: string - ui: - description: The URL to use in a web browser to access the - SwaggerUI explorer/exerciser for the API - type: string - type: object - type: object - description: Success - default: - description: "" - tags: - - Default Namespace - /batches: - get: - description: Gets a list of message batches - operationId: getBatches - parameters: - - description: Server-side request timeout (milliseconds, or set a custom suffix - like 10s) - in: header - name: Request-Timeout - schema: - default: 2m0s - type: string - - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' - in: query - name: author - schema: - type: string - - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' - in: query - name: confirmed - schema: - type: string - - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' - in: query - name: created - schema: - type: string - - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' - in: query - name: group - schema: - type: string - - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' - in: query - name: hash - schema: - type: string - - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' - in: query - name: id - schema: - type: string - - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' - in: query - name: key - schema: - type: string - - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' - in: query - name: node - schema: - type: string - - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' - in: query - name: payloadref - schema: - type: string - - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' - in: query - name: tx.id - schema: + errors: + description: An array of smart contract error definitions + items: + description: An array of smart contract error definitions + properties: + description: + description: A description of the smart contract error + type: string + id: + description: The UUID of the FFI error definition + format: uuid + type: string + interface: + description: The UUID of the FFI smart contract definition + that this error is part of + format: uuid + type: string + name: + description: The name of the error + type: string + namespace: + description: The namespace of the FFI + type: string + params: + description: An array of error parameter/argument definitions + items: + description: An array of error parameter/argument definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + pathname: + description: The unique name allocated to this error within + the FFI for use on URL paths + type: string + signature: + description: The stringified signature of the error, as + computed by the blockchain plugin + type: string + type: object + type: array + events: + description: An array of smart contract event definitions + items: + description: An array of smart contract event definitions + properties: + description: + description: A description of the smart contract event + type: string + details: + additionalProperties: + description: Additional blockchain specific fields about + this event from the original smart contract. Used by + the blockchain plugin and for documentation generation. + description: Additional blockchain specific fields about + this event from the original smart contract. Used by the + blockchain plugin and for documentation generation. + type: object + id: + description: The UUID of the FFI event definition + format: uuid + type: string + interface: + description: The UUID of the FFI smart contract definition + that this event is part of + format: uuid + type: string + name: + description: The name of the event + type: string + namespace: + description: The namespace of the FFI + type: string + params: + description: An array of event parameter/argument definitions + items: + description: An array of event parameter/argument definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + pathname: + description: The unique name allocated to this event within + the FFI for use on URL paths. Supports contracts that + have multiple event overrides with the same name + type: string + signature: + description: The stringified signature of the event, as + computed by the blockchain plugin + type: string + type: object + type: array + id: + description: The UUID of the FireFly interface (FFI) smart contract + definition + format: uuid + type: string + message: + description: The UUID of the broadcast message that was used to + publish this FFI to the network + format: uuid + type: string + methods: + description: An array of smart contract method definitions + items: + description: An array of smart contract method definitions + properties: + description: + description: A description of the smart contract method + type: string + details: + additionalProperties: + description: Additional blockchain specific fields about + this method from the original smart contract. Used by + the blockchain plugin and for documentation generation. + description: Additional blockchain specific fields about + this method from the original smart contract. Used by + the blockchain plugin and for documentation generation. + type: object + id: + description: The UUID of the FFI method definition + format: uuid + type: string + interface: + description: The UUID of the FFI smart contract definition + that this method is part of + format: uuid + type: string + name: + description: The name of the method + type: string + namespace: + description: The namespace of the FFI + type: string + params: + description: An array of method parameter/argument definitions + items: + description: An array of method parameter/argument definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + pathname: + description: The unique name allocated to this method within + the FFI for use on URL paths. Supports contracts that + have multiple method overrides with the same name + type: string + returns: + description: An array of method return definitions + items: + description: An array of method return definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + type: object + type: array + name: + description: The name of the FFI - usually matching the smart + contract name + type: string + namespace: + description: The namespace of the FFI + type: string + networkName: + description: The published name of the FFI within the multiparty + network + type: string + published: + description: Indicates if the FFI is published to other members + of the multiparty network + type: boolean + version: + description: A version for the FFI - use of semantic versioning + such as 'v1.0.1' is encouraged + type: string + type: object + description: Success + default: + description: "" + tags: + - Default Namespace + /apis/{apiName}/query/{methodPath}: + post: + description: Queries a method on a smart contract API. Performs a read-only + query. + operationId: postContractAPIQuery + parameters: + - description: The name of the contract API + in: path + name: apiName + required: true + schema: + type: string + - description: The name or uniquely generated path name of a method on a smart + contract + in: path + name: methodPath + required: true + schema: + type: string + - description: Server-side request timeout (milliseconds, or set a custom suffix + like 10s) + in: header + name: Request-Timeout + schema: + default: 2m0s + type: string + requestBody: + content: + application/json: + schema: + properties: + idempotencyKey: + description: An optional identifier to allow idempotent submission + of requests. Stored on the transaction uniquely within a namespace + type: string + input: + additionalProperties: + description: A map of named inputs. The name and type of each + input must be compatible with the FFI description of the method, + so that FireFly knows how to serialize it to the blockchain + via the connector + description: A map of named inputs. The name and type of each input + must be compatible with the FFI description of the method, so + that FireFly knows how to serialize it to the blockchain via the + connector + type: object + key: + description: The blockchain signing key that will sign the invocation. + Defaults to the first signing key of the organization that operates + the node + type: string + location: + description: A blockchain specific contract identifier. For example + an Ethereum contract address, or a Fabric chaincode name and channel + options: + additionalProperties: + description: A map of named inputs that will be passed through + to the blockchain connector + description: A map of named inputs that will be passed through to + the blockchain connector + type: object + type: object + responses: + "200": + content: + application/json: + schema: + additionalProperties: {} + type: object + description: Success + default: + description: "" + tags: + - Default Namespace + /apis/{id}: + put: + description: The ID of the contract API + operationId: putContractAPI + parameters: + - description: The name of the contract API + in: path + name: id + required: true + schema: + example: id + type: string + - description: When true the HTTP request blocks until the message is confirmed + in: query + name: confirm + schema: + example: "true" + type: string + - description: Server-side request timeout (milliseconds, or set a custom suffix + like 10s) + in: header + name: Request-Timeout + schema: + default: 2m0s + type: string + requestBody: + content: + application/json: + schema: + properties: + interface: + description: Reference to the FireFly Interface definition associated + with the contract API + properties: + id: + description: The UUID of the FireFly interface + format: uuid + type: string + name: + description: The name of the FireFly interface + type: string + version: + description: The version of the FireFly interface + type: string + type: object + location: + description: If this API is tied to an individual instance of a + smart contract, this field can include a blockchain specific contract + identifier. For example an Ethereum contract address, or a Fabric + chaincode name and channel + name: + description: The name that is used in the URL to access the API + type: string + networkName: + description: The published name of the API within the multiparty + network + type: string + type: object + responses: + "200": + content: + application/json: + schema: + properties: + id: + description: The UUID of the contract API + format: uuid + type: string + interface: + description: Reference to the FireFly Interface definition associated + with the contract API + properties: + id: + description: The UUID of the FireFly interface + format: uuid + type: string + name: + description: The name of the FireFly interface + type: string + version: + description: The version of the FireFly interface + type: string + type: object + location: + description: If this API is tied to an individual instance of + a smart contract, this field can include a blockchain specific + contract identifier. For example an Ethereum contract address, + or a Fabric chaincode name and channel + message: + description: The UUID of the broadcast message that was used to + publish this API to the network + format: uuid + type: string + name: + description: The name that is used in the URL to access the API + type: string + namespace: + description: The namespace of the contract API + type: string + networkName: + description: The published name of the API within the multiparty + network + type: string + published: + description: Indicates if the API is published to other members + of the multiparty network + type: boolean + urls: + description: The URLs to use to access the API + properties: + openapi: + description: The URL to download the OpenAPI v3 (Swagger) + description for the API generated in JSON or YAML format + type: string + ui: + description: The URL to use in a web browser to access the + SwaggerUI explorer/exerciser for the API + type: string + type: object + type: object + description: Success + "202": + content: + application/json: + schema: + properties: + id: + description: The UUID of the contract API + format: uuid + type: string + interface: + description: Reference to the FireFly Interface definition associated + with the contract API + properties: + id: + description: The UUID of the FireFly interface + format: uuid + type: string + name: + description: The name of the FireFly interface + type: string + version: + description: The version of the FireFly interface + type: string + type: object + location: + description: If this API is tied to an individual instance of + a smart contract, this field can include a blockchain specific + contract identifier. For example an Ethereum contract address, + or a Fabric chaincode name and channel + message: + description: The UUID of the broadcast message that was used to + publish this API to the network + format: uuid + type: string + name: + description: The name that is used in the URL to access the API + type: string + namespace: + description: The namespace of the contract API + type: string + networkName: + description: The published name of the API within the multiparty + network + type: string + published: + description: Indicates if the API is published to other members + of the multiparty network + type: boolean + urls: + description: The URLs to use to access the API + properties: + openapi: + description: The URL to download the OpenAPI v3 (Swagger) + description for the API generated in JSON or YAML format + type: string + ui: + description: The URL to use in a web browser to access the + SwaggerUI explorer/exerciser for the API + type: string + type: object + type: object + description: Success + default: + description: "" + tags: + - Default Namespace + /batches: + get: + description: Gets a list of message batches + operationId: getBatches + parameters: + - description: Server-side request timeout (milliseconds, or set a custom suffix + like 10s) + in: header + name: Request-Timeout + schema: + default: 2m0s + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: author + schema: + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: confirmed + schema: + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: created + schema: + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: group + schema: + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: hash + schema: + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: id + schema: + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: key + schema: + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: node + schema: + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: payloadref + schema: + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: tx.id + schema: type: string - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' in: query @@ -2417,6 +3013,16 @@ paths: name: name schema: type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: networkname + schema: + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: published + schema: + type: string - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' in: query name: version @@ -2678,6 +3284,14 @@ paths: namespace: description: The namespace of the FFI type: string + networkName: + description: The published name of the FFI within the multiparty + network + type: string + published: + description: Indicates if the FFI is published to other members + of the multiparty network + type: boolean version: description: A version for the FFI - use of semantic versioning such as 'v1.0.1' is encouraged @@ -2699,6 +3313,12 @@ paths: schema: example: "true" type: string + - description: When true the definition will be published to all other members + of the multiparty network + in: query + name: publish + schema: + type: string - description: Server-side request timeout (milliseconds, or set a custom suffix like 10s) in: header @@ -2847,6 +3467,10 @@ paths: description: The name of the FFI - usually matching the smart contract name type: string + networkName: + description: The published name of the FFI within the multiparty + network + type: string version: description: A version for the FFI - use of semantic versioning such as 'v1.0.1' is encouraged @@ -3065,6 +3689,14 @@ paths: namespace: description: The namespace of the FFI type: string + networkName: + description: The published name of the FFI within the multiparty + network + type: string + published: + description: Indicates if the FFI is published to other members + of the multiparty network + type: boolean version: description: A version for the FFI - use of semantic versioning such as 'v1.0.1' is encouraged @@ -3076,6 +3708,32 @@ paths: tags: - Default Namespace /contracts/interfaces/{interfaceId}: + delete: + description: Delete a contract interface + operationId: deleteContractInterface + parameters: + - description: The ID of the contract interface + in: path + name: interfaceId + required: true + schema: + type: string + - description: Server-side request timeout (milliseconds, or set a custom suffix + like 10s) + in: header + name: Request-Timeout + schema: + default: 2m0s + type: string + responses: + "204": + content: + application/json: {} + description: Success + default: + description: "" + tags: + - Default Namespace get: description: Gets a contract interface by its ID operationId: getContractInterface @@ -3313,6 +3971,276 @@ paths: namespace: description: The namespace of the FFI type: string + networkName: + description: The published name of the FFI within the multiparty + network + type: string + published: + description: Indicates if the FFI is published to other members + of the multiparty network + type: boolean + version: + description: A version for the FFI - use of semantic versioning + such as 'v1.0.1' is encouraged + type: string + type: object + description: Success + default: + description: "" + tags: + - Default Namespace + /contracts/interfaces/{name}/{version}: + get: + description: Gets a contract interface by its name and version + operationId: getContractInterfaceByNameAndVersion + parameters: + - description: The name of the contract interface + in: path + name: name + required: true + schema: + type: string + - description: The version of the contract interface + in: path + name: version + required: true + schema: + type: string + - description: When set, the API will return the full FireFly Interface document + including all methods, events, and parameters + in: query + name: fetchchildren + schema: + example: "true" + type: string + - description: Server-side request timeout (milliseconds, or set a custom suffix + like 10s) + in: header + name: Request-Timeout + schema: + default: 2m0s + type: string + responses: + "200": + content: + application/json: + schema: + properties: + description: + description: A description of the smart contract this FFI represents + type: string + errors: + description: An array of smart contract error definitions + items: + description: An array of smart contract error definitions + properties: + description: + description: A description of the smart contract error + type: string + id: + description: The UUID of the FFI error definition + format: uuid + type: string + interface: + description: The UUID of the FFI smart contract definition + that this error is part of + format: uuid + type: string + name: + description: The name of the error + type: string + namespace: + description: The namespace of the FFI + type: string + params: + description: An array of error parameter/argument definitions + items: + description: An array of error parameter/argument definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + pathname: + description: The unique name allocated to this error within + the FFI for use on URL paths + type: string + signature: + description: The stringified signature of the error, as + computed by the blockchain plugin + type: string + type: object + type: array + events: + description: An array of smart contract event definitions + items: + description: An array of smart contract event definitions + properties: + description: + description: A description of the smart contract event + type: string + details: + additionalProperties: + description: Additional blockchain specific fields about + this event from the original smart contract. Used by + the blockchain plugin and for documentation generation. + description: Additional blockchain specific fields about + this event from the original smart contract. Used by the + blockchain plugin and for documentation generation. + type: object + id: + description: The UUID of the FFI event definition + format: uuid + type: string + interface: + description: The UUID of the FFI smart contract definition + that this event is part of + format: uuid + type: string + name: + description: The name of the event + type: string + namespace: + description: The namespace of the FFI + type: string + params: + description: An array of event parameter/argument definitions + items: + description: An array of event parameter/argument definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + pathname: + description: The unique name allocated to this event within + the FFI for use on URL paths. Supports contracts that + have multiple event overrides with the same name + type: string + signature: + description: The stringified signature of the event, as + computed by the blockchain plugin + type: string + type: object + type: array + id: + description: The UUID of the FireFly interface (FFI) smart contract + definition + format: uuid + type: string + message: + description: The UUID of the broadcast message that was used to + publish this FFI to the network + format: uuid + type: string + methods: + description: An array of smart contract method definitions + items: + description: An array of smart contract method definitions + properties: + description: + description: A description of the smart contract method + type: string + details: + additionalProperties: + description: Additional blockchain specific fields about + this method from the original smart contract. Used by + the blockchain plugin and for documentation generation. + description: Additional blockchain specific fields about + this method from the original smart contract. Used by + the blockchain plugin and for documentation generation. + type: object + id: + description: The UUID of the FFI method definition + format: uuid + type: string + interface: + description: The UUID of the FFI smart contract definition + that this method is part of + format: uuid + type: string + name: + description: The name of the method + type: string + namespace: + description: The namespace of the FFI + type: string + params: + description: An array of method parameter/argument definitions + items: + description: An array of method parameter/argument definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + pathname: + description: The unique name allocated to this method within + the FFI for use on URL paths. Supports contracts that + have multiple method overrides with the same name + type: string + returns: + description: An array of method return definitions + items: + description: An array of method return definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + type: object + type: array + name: + description: The name of the FFI - usually matching the smart + contract name + type: string + namespace: + description: The namespace of the FFI + type: string + networkName: + description: The published name of the FFI within the multiparty + network + type: string + published: + description: Indicates if the FFI is published to other members + of the multiparty network + type: boolean version: description: A version for the FFI - use of semantic versioning such as 'v1.0.1' is encouraged @@ -3323,10 +4251,11 @@ paths: description: "" tags: - Default Namespace - /contracts/interfaces/{name}/{version}: - get: - description: Gets a contract interface by its name and version - operationId: getContractInterfaceByNameAndVersion + /contracts/interfaces/{name}/{version}/publish: + post: + description: Publish a contract interface to all other members of the multiparty + network + operationId: postContractInterfacePublish parameters: - description: The name of the contract interface in: path @@ -3340,12 +4269,10 @@ paths: required: true schema: type: string - - description: When set, the API will return the full FireFly Interface document - including all methods, events, and parameters + - description: When true the HTTP request blocks until the message is confirmed in: query - name: fetchchildren + name: confirm schema: - example: "true" type: string - description: Server-side request timeout (milliseconds, or set a custom suffix like 10s) @@ -3354,6 +4281,16 @@ paths: schema: default: 2m0s type: string + requestBody: + content: + application/json: + schema: + properties: + networkName: + description: An optional name to be used for publishing this definition + to the multiparty network, which may differ from the local name + type: string + type: object responses: "200": content: @@ -3567,6 +4504,240 @@ paths: namespace: description: The namespace of the FFI type: string + networkName: + description: The published name of the FFI within the multiparty + network + type: string + published: + description: Indicates if the FFI is published to other members + of the multiparty network + type: boolean + version: + description: A version for the FFI - use of semantic versioning + such as 'v1.0.1' is encouraged + type: string + type: object + description: Success + "202": + content: + application/json: + schema: + properties: + description: + description: A description of the smart contract this FFI represents + type: string + errors: + description: An array of smart contract error definitions + items: + description: An array of smart contract error definitions + properties: + description: + description: A description of the smart contract error + type: string + id: + description: The UUID of the FFI error definition + format: uuid + type: string + interface: + description: The UUID of the FFI smart contract definition + that this error is part of + format: uuid + type: string + name: + description: The name of the error + type: string + namespace: + description: The namespace of the FFI + type: string + params: + description: An array of error parameter/argument definitions + items: + description: An array of error parameter/argument definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + pathname: + description: The unique name allocated to this error within + the FFI for use on URL paths + type: string + signature: + description: The stringified signature of the error, as + computed by the blockchain plugin + type: string + type: object + type: array + events: + description: An array of smart contract event definitions + items: + description: An array of smart contract event definitions + properties: + description: + description: A description of the smart contract event + type: string + details: + additionalProperties: + description: Additional blockchain specific fields about + this event from the original smart contract. Used by + the blockchain plugin and for documentation generation. + description: Additional blockchain specific fields about + this event from the original smart contract. Used by the + blockchain plugin and for documentation generation. + type: object + id: + description: The UUID of the FFI event definition + format: uuid + type: string + interface: + description: The UUID of the FFI smart contract definition + that this event is part of + format: uuid + type: string + name: + description: The name of the event + type: string + namespace: + description: The namespace of the FFI + type: string + params: + description: An array of event parameter/argument definitions + items: + description: An array of event parameter/argument definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + pathname: + description: The unique name allocated to this event within + the FFI for use on URL paths. Supports contracts that + have multiple event overrides with the same name + type: string + signature: + description: The stringified signature of the event, as + computed by the blockchain plugin + type: string + type: object + type: array + id: + description: The UUID of the FireFly interface (FFI) smart contract + definition + format: uuid + type: string + message: + description: The UUID of the broadcast message that was used to + publish this FFI to the network + format: uuid + type: string + methods: + description: An array of smart contract method definitions + items: + description: An array of smart contract method definitions + properties: + description: + description: A description of the smart contract method + type: string + details: + additionalProperties: + description: Additional blockchain specific fields about + this method from the original smart contract. Used by + the blockchain plugin and for documentation generation. + description: Additional blockchain specific fields about + this method from the original smart contract. Used by + the blockchain plugin and for documentation generation. + type: object + id: + description: The UUID of the FFI method definition + format: uuid + type: string + interface: + description: The UUID of the FFI smart contract definition + that this method is part of + format: uuid + type: string + name: + description: The name of the method + type: string + namespace: + description: The namespace of the FFI + type: string + params: + description: An array of method parameter/argument definitions + items: + description: An array of method parameter/argument definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + pathname: + description: The unique name allocated to this method within + the FFI for use on URL paths. Supports contracts that + have multiple method overrides with the same name + type: string + returns: + description: An array of method return definitions + items: + description: An array of method return definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + type: object + type: array + name: + description: The name of the FFI - usually matching the smart + contract name + type: string + namespace: + description: The namespace of the FFI + type: string + networkName: + description: The published name of the FFI within the multiparty + network + type: string + published: + description: Indicates if the FFI is published to other members + of the multiparty network + type: boolean version: description: A version for the FFI - use of semantic versioning such as 'v1.0.1' is encouraged @@ -3828,6 +4999,14 @@ paths: namespace: description: The namespace of the FFI type: string + networkName: + description: The published name of the FFI within the multiparty + network + type: string + published: + description: Indicates if the FFI is published to other members + of the multiparty network + type: boolean version: description: A version for the FFI - use of semantic versioning such as 'v1.0.1' is encouraged @@ -5562,6 +6741,11 @@ paths: name: pins schema: type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: rejectreason + schema: + type: string - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' in: query name: sequence @@ -5839,6 +7023,11 @@ paths: name: pins schema: type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: rejectreason + schema: + type: string - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' in: query name: sequence @@ -6063,6 +7252,10 @@ paths: assigned for each topic type: string type: array + rejectReason: + description: If a message was rejected, provides details on the + rejection reason + type: string state: description: The current state of the message enum: @@ -6163,6 +7356,11 @@ paths: name: pins schema: type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: rejectreason + schema: + type: string - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' in: query name: sequence @@ -7444,6 +8642,7 @@ paths: description: The type of the verifier enum: - ethereum_address + - tezos_address - fabric_msp_id - dx_peer_id type: string @@ -7775,6 +8974,7 @@ paths: description: The type of the verifier enum: - ethereum_address + - tezos_address - fabric_msp_id - dx_peer_id type: string @@ -8270,6 +9470,7 @@ paths: description: The type of the verifier enum: - ethereum_address + - tezos_address - fabric_msp_id - dx_peer_id type: string @@ -8361,6 +9562,11 @@ paths: name: pins schema: type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: rejectreason + schema: + type: string - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' in: query name: sequence @@ -8587,6 +9793,10 @@ paths: is assigned for each topic type: string type: array + rejectReason: + description: If a message was rejected, provides details on + the rejection reason + type: string state: description: The current state of the message enum: @@ -8862,6 +10072,10 @@ paths: assigned for each topic type: string type: array + rejectReason: + description: If a message was rejected, provides details on the + rejection reason + type: string state: description: The current state of the message enum: @@ -9498,6 +10712,10 @@ paths: assigned for each topic type: string type: array + rejectReason: + description: If a message was rejected, provides details on the + rejection reason + type: string state: description: The current state of the message enum: @@ -9653,6 +10871,10 @@ paths: assigned for each topic type: string type: array + rejectReason: + description: If a message was rejected, provides details on the + rejection reason + type: string state: description: The current state of the message enum: @@ -9979,6 +11201,10 @@ paths: assigned for each topic type: string type: array + rejectReason: + description: If a message was rejected, provides details on the + rejection reason + type: string state: description: The current state of the message enum: @@ -10140,6 +11366,10 @@ paths: assigned for each topic type: string type: array + rejectReason: + description: If a message was rejected, provides details on the + rejection reason + type: string state: description: The current state of the message enum: @@ -10547,6 +11777,10 @@ paths: assigned for each topic type: string type: array + rejectReason: + description: If a message was rejected, provides details on the + rejection reason + type: string state: description: The current state of the message enum: @@ -10701,6 +11935,16 @@ paths: name: name schema: type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: networkname + schema: + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: published + schema: + type: string - description: Sort field. For multi-field sort use comma separated values (or multiple query values) with '-' prefix for descending in: query @@ -10778,6 +12022,14 @@ paths: namespace: description: The namespace of the contract API type: string + networkName: + description: The published name of the API within the multiparty + network + type: string + published: + description: Indicates if the API is published to other members + of the multiparty network + type: boolean urls: description: The URLs to use to access the API properties: @@ -10814,6 +12066,12 @@ paths: schema: example: "true" type: string + - description: When true the definition will be published to all other members + of the multiparty network + in: query + name: publish + schema: + type: string - description: Server-side request timeout (milliseconds, or set a custom suffix like 10s) in: header @@ -10849,6 +12107,10 @@ paths: name: description: The name that is used in the URL to access the API type: string + networkName: + description: The published name of the API within the multiparty + network + type: string type: object responses: "200": @@ -10891,6 +12153,14 @@ paths: namespace: description: The namespace of the contract API type: string + networkName: + description: The published name of the API within the multiparty + network + type: string + published: + description: Indicates if the API is published to other members + of the multiparty network + type: boolean urls: description: The URLs to use to access the API properties: @@ -10945,6 +12215,14 @@ paths: namespace: description: The namespace of the contract API type: string + networkName: + description: The published name of the API within the multiparty + network + type: string + published: + description: Indicates if the API is published to other members + of the multiparty network + type: boolean urls: description: The URLs to use to access the API properties: @@ -10964,6 +12242,39 @@ paths: tags: - Non-Default Namespace /namespaces/{ns}/apis/{apiName}: + delete: + description: Delete a contract API + operationId: deleteContractAPINamespace + parameters: + - description: The name of the contract API + in: path + name: apiName + required: true + schema: + type: string + - description: The namespace which scopes this request + in: path + name: ns + required: true + schema: + example: default + type: string + - description: Server-side request timeout (milliseconds, or set a custom suffix + like 10s) + in: header + name: Request-Timeout + schema: + default: 2m0s + type: string + responses: + "204": + content: + application/json: {} + description: Success + default: + description: "" + tags: + - Non-Default Namespace get: description: Gets information about a contract API, including the URLs for the OpenAPI Spec and Swagger UI for the API @@ -11030,6 +12341,14 @@ paths: namespace: description: The namespace of the contract API type: string + networkName: + description: The published name of the API within the multiparty + network + type: string + published: + description: Indicates if the API is published to other members + of the multiparty network + type: boolean urls: description: The URLs to use to access the API properties: @@ -11286,6 +12605,14 @@ paths: namespace: description: The namespace of the FFI type: string + networkName: + description: The published name of the FFI within the multiparty + network + type: string + published: + description: Indicates if the FFI is published to other members + of the multiparty network + type: boolean version: description: A version for the FFI - use of semantic versioning such as 'v1.0.1' is encouraged @@ -12222,6 +13549,503 @@ paths: description: "" tags: - Non-Default Namespace + /namespaces/{ns}/apis/{apiName}/publish: + post: + description: Publish a contract API to all other members of the multiparty network + operationId: postContractAPIPublishNamespace + parameters: + - description: The name of the contract API + in: path + name: apiName + required: true + schema: + type: string + - description: The namespace which scopes this request + in: path + name: ns + required: true + schema: + example: default + type: string + - description: When true the HTTP request blocks until the message is confirmed + in: query + name: confirm + schema: + type: string + - description: Server-side request timeout (milliseconds, or set a custom suffix + like 10s) + in: header + name: Request-Timeout + schema: + default: 2m0s + type: string + requestBody: + content: + application/json: + schema: + properties: + networkName: + description: An optional name to be used for publishing this definition + to the multiparty network, which may differ from the local name + type: string + type: object + responses: + "200": + content: + application/json: + schema: + properties: + description: + description: A description of the smart contract this FFI represents + type: string + errors: + description: An array of smart contract error definitions + items: + description: An array of smart contract error definitions + properties: + description: + description: A description of the smart contract error + type: string + id: + description: The UUID of the FFI error definition + format: uuid + type: string + interface: + description: The UUID of the FFI smart contract definition + that this error is part of + format: uuid + type: string + name: + description: The name of the error + type: string + namespace: + description: The namespace of the FFI + type: string + params: + description: An array of error parameter/argument definitions + items: + description: An array of error parameter/argument definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + pathname: + description: The unique name allocated to this error within + the FFI for use on URL paths + type: string + signature: + description: The stringified signature of the error, as + computed by the blockchain plugin + type: string + type: object + type: array + events: + description: An array of smart contract event definitions + items: + description: An array of smart contract event definitions + properties: + description: + description: A description of the smart contract event + type: string + details: + additionalProperties: + description: Additional blockchain specific fields about + this event from the original smart contract. Used by + the blockchain plugin and for documentation generation. + description: Additional blockchain specific fields about + this event from the original smart contract. Used by the + blockchain plugin and for documentation generation. + type: object + id: + description: The UUID of the FFI event definition + format: uuid + type: string + interface: + description: The UUID of the FFI smart contract definition + that this event is part of + format: uuid + type: string + name: + description: The name of the event + type: string + namespace: + description: The namespace of the FFI + type: string + params: + description: An array of event parameter/argument definitions + items: + description: An array of event parameter/argument definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + pathname: + description: The unique name allocated to this event within + the FFI for use on URL paths. Supports contracts that + have multiple event overrides with the same name + type: string + signature: + description: The stringified signature of the event, as + computed by the blockchain plugin + type: string + type: object + type: array + id: + description: The UUID of the FireFly interface (FFI) smart contract + definition + format: uuid + type: string + message: + description: The UUID of the broadcast message that was used to + publish this FFI to the network + format: uuid + type: string + methods: + description: An array of smart contract method definitions + items: + description: An array of smart contract method definitions + properties: + description: + description: A description of the smart contract method + type: string + details: + additionalProperties: + description: Additional blockchain specific fields about + this method from the original smart contract. Used by + the blockchain plugin and for documentation generation. + description: Additional blockchain specific fields about + this method from the original smart contract. Used by + the blockchain plugin and for documentation generation. + type: object + id: + description: The UUID of the FFI method definition + format: uuid + type: string + interface: + description: The UUID of the FFI smart contract definition + that this method is part of + format: uuid + type: string + name: + description: The name of the method + type: string + namespace: + description: The namespace of the FFI + type: string + params: + description: An array of method parameter/argument definitions + items: + description: An array of method parameter/argument definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + pathname: + description: The unique name allocated to this method within + the FFI for use on URL paths. Supports contracts that + have multiple method overrides with the same name + type: string + returns: + description: An array of method return definitions + items: + description: An array of method return definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + type: object + type: array + name: + description: The name of the FFI - usually matching the smart + contract name + type: string + namespace: + description: The namespace of the FFI + type: string + networkName: + description: The published name of the FFI within the multiparty + network + type: string + published: + description: Indicates if the FFI is published to other members + of the multiparty network + type: boolean + version: + description: A version for the FFI - use of semantic versioning + such as 'v1.0.1' is encouraged + type: string + type: object + description: Success + "202": + content: + application/json: + schema: + properties: + description: + description: A description of the smart contract this FFI represents + type: string + errors: + description: An array of smart contract error definitions + items: + description: An array of smart contract error definitions + properties: + description: + description: A description of the smart contract error + type: string + id: + description: The UUID of the FFI error definition + format: uuid + type: string + interface: + description: The UUID of the FFI smart contract definition + that this error is part of + format: uuid + type: string + name: + description: The name of the error + type: string + namespace: + description: The namespace of the FFI + type: string + params: + description: An array of error parameter/argument definitions + items: + description: An array of error parameter/argument definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + pathname: + description: The unique name allocated to this error within + the FFI for use on URL paths + type: string + signature: + description: The stringified signature of the error, as + computed by the blockchain plugin + type: string + type: object + type: array + events: + description: An array of smart contract event definitions + items: + description: An array of smart contract event definitions + properties: + description: + description: A description of the smart contract event + type: string + details: + additionalProperties: + description: Additional blockchain specific fields about + this event from the original smart contract. Used by + the blockchain plugin and for documentation generation. + description: Additional blockchain specific fields about + this event from the original smart contract. Used by the + blockchain plugin and for documentation generation. + type: object + id: + description: The UUID of the FFI event definition + format: uuid + type: string + interface: + description: The UUID of the FFI smart contract definition + that this event is part of + format: uuid + type: string + name: + description: The name of the event + type: string + namespace: + description: The namespace of the FFI + type: string + params: + description: An array of event parameter/argument definitions + items: + description: An array of event parameter/argument definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + pathname: + description: The unique name allocated to this event within + the FFI for use on URL paths. Supports contracts that + have multiple event overrides with the same name + type: string + signature: + description: The stringified signature of the event, as + computed by the blockchain plugin + type: string + type: object + type: array + id: + description: The UUID of the FireFly interface (FFI) smart contract + definition + format: uuid + type: string + message: + description: The UUID of the broadcast message that was used to + publish this FFI to the network + format: uuid + type: string + methods: + description: An array of smart contract method definitions + items: + description: An array of smart contract method definitions + properties: + description: + description: A description of the smart contract method + type: string + details: + additionalProperties: + description: Additional blockchain specific fields about + this method from the original smart contract. Used by + the blockchain plugin and for documentation generation. + description: Additional blockchain specific fields about + this method from the original smart contract. Used by + the blockchain plugin and for documentation generation. + type: object + id: + description: The UUID of the FFI method definition + format: uuid + type: string + interface: + description: The UUID of the FFI smart contract definition + that this method is part of + format: uuid + type: string + name: + description: The name of the method + type: string + namespace: + description: The namespace of the FFI + type: string + params: + description: An array of method parameter/argument definitions + items: + description: An array of method parameter/argument definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + pathname: + description: The unique name allocated to this method within + the FFI for use on URL paths. Supports contracts that + have multiple method overrides with the same name + type: string + returns: + description: An array of method return definitions + items: + description: An array of method return definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + type: object + type: array + name: + description: The name of the FFI - usually matching the smart + contract name + type: string + namespace: + description: The namespace of the FFI + type: string + networkName: + description: The published name of the FFI within the multiparty + network + type: string + published: + description: Indicates if the FFI is published to other members + of the multiparty network + type: boolean + version: + description: A version for the FFI - use of semantic versioning + such as 'v1.0.1' is encouraged + type: string + type: object + description: Success + default: + description: "" + tags: + - Non-Default Namespace /namespaces/{ns}/apis/{apiName}/query/{methodPath}: post: description: Queries a method on a smart contract API. Performs a read-only @@ -12609,6 +14433,10 @@ paths: name: description: The name that is used in the URL to access the API type: string + networkName: + description: The published name of the API within the multiparty + network + type: string type: object responses: "200": @@ -12651,6 +14479,14 @@ paths: namespace: description: The namespace of the contract API type: string + networkName: + description: The published name of the API within the multiparty + network + type: string + published: + description: Indicates if the API is published to other members + of the multiparty network + type: boolean urls: description: The URLs to use to access the API properties: @@ -12705,6 +14541,14 @@ paths: namespace: description: The namespace of the contract API type: string + networkName: + description: The published name of the API within the multiparty + network + type: string + published: + description: Indicates if the API is published to other members + of the multiparty network + type: boolean urls: description: The URLs to use to access the API properties: @@ -13591,6 +15435,16 @@ paths: name: name schema: type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: networkname + schema: + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: published + schema: + type: string - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' in: query name: version @@ -13852,6 +15706,14 @@ paths: namespace: description: The namespace of the FFI type: string + networkName: + description: The published name of the FFI within the multiparty + network + type: string + published: + description: Indicates if the FFI is published to other members + of the multiparty network + type: boolean version: description: A version for the FFI - use of semantic versioning such as 'v1.0.1' is encouraged @@ -13880,6 +15742,12 @@ paths: schema: example: "true" type: string + - description: When true the definition will be published to all other members + of the multiparty network + in: query + name: publish + schema: + type: string - description: Server-side request timeout (milliseconds, or set a custom suffix like 10s) in: header @@ -14028,6 +15896,10 @@ paths: description: The name of the FFI - usually matching the smart contract name type: string + networkName: + description: The published name of the FFI within the multiparty + network + type: string version: description: A version for the FFI - use of semantic versioning such as 'v1.0.1' is encouraged @@ -14246,6 +16118,14 @@ paths: namespace: description: The namespace of the FFI type: string + networkName: + description: The published name of the FFI within the multiparty + network + type: string + published: + description: Indicates if the FFI is published to other members + of the multiparty network + type: boolean version: description: A version for the FFI - use of semantic versioning such as 'v1.0.1' is encouraged @@ -14257,6 +16137,39 @@ paths: tags: - Non-Default Namespace /namespaces/{ns}/contracts/interfaces/{interfaceId}: + delete: + description: Delete a contract interface + operationId: deleteContractInterfaceNamespace + parameters: + - description: The ID of the contract interface + in: path + name: interfaceId + required: true + schema: + type: string + - description: The namespace which scopes this request + in: path + name: ns + required: true + schema: + example: default + type: string + - description: Server-side request timeout (milliseconds, or set a custom suffix + like 10s) + in: header + name: Request-Timeout + schema: + default: 2m0s + type: string + responses: + "204": + content: + application/json: {} + description: Success + default: + description: "" + tags: + - Non-Default Namespace get: description: Gets a contract interface by its ID operationId: getContractInterfaceNamespace @@ -14501,6 +16414,283 @@ paths: namespace: description: The namespace of the FFI type: string + networkName: + description: The published name of the FFI within the multiparty + network + type: string + published: + description: Indicates if the FFI is published to other members + of the multiparty network + type: boolean + version: + description: A version for the FFI - use of semantic versioning + such as 'v1.0.1' is encouraged + type: string + type: object + description: Success + default: + description: "" + tags: + - Non-Default Namespace + /namespaces/{ns}/contracts/interfaces/{name}/{version}: + get: + description: Gets a contract interface by its name and version + operationId: getContractInterfaceByNameAndVersionNamespace + parameters: + - description: The name of the contract interface + in: path + name: name + required: true + schema: + type: string + - description: The version of the contract interface + in: path + name: version + required: true + schema: + type: string + - description: The namespace which scopes this request + in: path + name: ns + required: true + schema: + example: default + type: string + - description: When set, the API will return the full FireFly Interface document + including all methods, events, and parameters + in: query + name: fetchchildren + schema: + example: "true" + type: string + - description: Server-side request timeout (milliseconds, or set a custom suffix + like 10s) + in: header + name: Request-Timeout + schema: + default: 2m0s + type: string + responses: + "200": + content: + application/json: + schema: + properties: + description: + description: A description of the smart contract this FFI represents + type: string + errors: + description: An array of smart contract error definitions + items: + description: An array of smart contract error definitions + properties: + description: + description: A description of the smart contract error + type: string + id: + description: The UUID of the FFI error definition + format: uuid + type: string + interface: + description: The UUID of the FFI smart contract definition + that this error is part of + format: uuid + type: string + name: + description: The name of the error + type: string + namespace: + description: The namespace of the FFI + type: string + params: + description: An array of error parameter/argument definitions + items: + description: An array of error parameter/argument definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + pathname: + description: The unique name allocated to this error within + the FFI for use on URL paths + type: string + signature: + description: The stringified signature of the error, as + computed by the blockchain plugin + type: string + type: object + type: array + events: + description: An array of smart contract event definitions + items: + description: An array of smart contract event definitions + properties: + description: + description: A description of the smart contract event + type: string + details: + additionalProperties: + description: Additional blockchain specific fields about + this event from the original smart contract. Used by + the blockchain plugin and for documentation generation. + description: Additional blockchain specific fields about + this event from the original smart contract. Used by the + blockchain plugin and for documentation generation. + type: object + id: + description: The UUID of the FFI event definition + format: uuid + type: string + interface: + description: The UUID of the FFI smart contract definition + that this event is part of + format: uuid + type: string + name: + description: The name of the event + type: string + namespace: + description: The namespace of the FFI + type: string + params: + description: An array of event parameter/argument definitions + items: + description: An array of event parameter/argument definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + pathname: + description: The unique name allocated to this event within + the FFI for use on URL paths. Supports contracts that + have multiple event overrides with the same name + type: string + signature: + description: The stringified signature of the event, as + computed by the blockchain plugin + type: string + type: object + type: array + id: + description: The UUID of the FireFly interface (FFI) smart contract + definition + format: uuid + type: string + message: + description: The UUID of the broadcast message that was used to + publish this FFI to the network + format: uuid + type: string + methods: + description: An array of smart contract method definitions + items: + description: An array of smart contract method definitions + properties: + description: + description: A description of the smart contract method + type: string + details: + additionalProperties: + description: Additional blockchain specific fields about + this method from the original smart contract. Used by + the blockchain plugin and for documentation generation. + description: Additional blockchain specific fields about + this method from the original smart contract. Used by + the blockchain plugin and for documentation generation. + type: object + id: + description: The UUID of the FFI method definition + format: uuid + type: string + interface: + description: The UUID of the FFI smart contract definition + that this method is part of + format: uuid + type: string + name: + description: The name of the method + type: string + namespace: + description: The namespace of the FFI + type: string + params: + description: An array of method parameter/argument definitions + items: + description: An array of method parameter/argument definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + pathname: + description: The unique name allocated to this method within + the FFI for use on URL paths. Supports contracts that + have multiple method overrides with the same name + type: string + returns: + description: An array of method return definitions + items: + description: An array of method return definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + type: object + type: array + name: + description: The name of the FFI - usually matching the smart + contract name + type: string + namespace: + description: The namespace of the FFI + type: string + networkName: + description: The published name of the FFI within the multiparty + network + type: string + published: + description: Indicates if the FFI is published to other members + of the multiparty network + type: boolean version: description: A version for the FFI - use of semantic versioning such as 'v1.0.1' is encouraged @@ -14511,10 +16701,11 @@ paths: description: "" tags: - Non-Default Namespace - /namespaces/{ns}/contracts/interfaces/{name}/{version}: - get: - description: Gets a contract interface by its name and version - operationId: getContractInterfaceByNameAndVersionNamespace + /namespaces/{ns}/contracts/interfaces/{name}/{version}/publish: + post: + description: Publish a contract interface to all other members of the multiparty + network + operationId: postContractInterfacePublishNamespace parameters: - description: The name of the contract interface in: path @@ -14535,12 +16726,10 @@ paths: schema: example: default type: string - - description: When set, the API will return the full FireFly Interface document - including all methods, events, and parameters + - description: When true the HTTP request blocks until the message is confirmed in: query - name: fetchchildren + name: confirm schema: - example: "true" type: string - description: Server-side request timeout (milliseconds, or set a custom suffix like 10s) @@ -14549,6 +16738,16 @@ paths: schema: default: 2m0s type: string + requestBody: + content: + application/json: + schema: + properties: + networkName: + description: An optional name to be used for publishing this definition + to the multiparty network, which may differ from the local name + type: string + type: object responses: "200": content: @@ -14762,6 +16961,240 @@ paths: namespace: description: The namespace of the FFI type: string + networkName: + description: The published name of the FFI within the multiparty + network + type: string + published: + description: Indicates if the FFI is published to other members + of the multiparty network + type: boolean + version: + description: A version for the FFI - use of semantic versioning + such as 'v1.0.1' is encouraged + type: string + type: object + description: Success + "202": + content: + application/json: + schema: + properties: + description: + description: A description of the smart contract this FFI represents + type: string + errors: + description: An array of smart contract error definitions + items: + description: An array of smart contract error definitions + properties: + description: + description: A description of the smart contract error + type: string + id: + description: The UUID of the FFI error definition + format: uuid + type: string + interface: + description: The UUID of the FFI smart contract definition + that this error is part of + format: uuid + type: string + name: + description: The name of the error + type: string + namespace: + description: The namespace of the FFI + type: string + params: + description: An array of error parameter/argument definitions + items: + description: An array of error parameter/argument definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + pathname: + description: The unique name allocated to this error within + the FFI for use on URL paths + type: string + signature: + description: The stringified signature of the error, as + computed by the blockchain plugin + type: string + type: object + type: array + events: + description: An array of smart contract event definitions + items: + description: An array of smart contract event definitions + properties: + description: + description: A description of the smart contract event + type: string + details: + additionalProperties: + description: Additional blockchain specific fields about + this event from the original smart contract. Used by + the blockchain plugin and for documentation generation. + description: Additional blockchain specific fields about + this event from the original smart contract. Used by the + blockchain plugin and for documentation generation. + type: object + id: + description: The UUID of the FFI event definition + format: uuid + type: string + interface: + description: The UUID of the FFI smart contract definition + that this event is part of + format: uuid + type: string + name: + description: The name of the event + type: string + namespace: + description: The namespace of the FFI + type: string + params: + description: An array of event parameter/argument definitions + items: + description: An array of event parameter/argument definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + pathname: + description: The unique name allocated to this event within + the FFI for use on URL paths. Supports contracts that + have multiple event overrides with the same name + type: string + signature: + description: The stringified signature of the event, as + computed by the blockchain plugin + type: string + type: object + type: array + id: + description: The UUID of the FireFly interface (FFI) smart contract + definition + format: uuid + type: string + message: + description: The UUID of the broadcast message that was used to + publish this FFI to the network + format: uuid + type: string + methods: + description: An array of smart contract method definitions + items: + description: An array of smart contract method definitions + properties: + description: + description: A description of the smart contract method + type: string + details: + additionalProperties: + description: Additional blockchain specific fields about + this method from the original smart contract. Used by + the blockchain plugin and for documentation generation. + description: Additional blockchain specific fields about + this method from the original smart contract. Used by + the blockchain plugin and for documentation generation. + type: object + id: + description: The UUID of the FFI method definition + format: uuid + type: string + interface: + description: The UUID of the FFI smart contract definition + that this method is part of + format: uuid + type: string + name: + description: The name of the method + type: string + namespace: + description: The namespace of the FFI + type: string + params: + description: An array of method parameter/argument definitions + items: + description: An array of method parameter/argument definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + pathname: + description: The unique name allocated to this method within + the FFI for use on URL paths. Supports contracts that + have multiple method overrides with the same name + type: string + returns: + description: An array of method return definitions + items: + description: An array of method return definitions + properties: + name: + description: The name of the parameter. Note that + parameters must be ordered correctly on the FFI, + according to the order in the blockchain smart contract + type: string + schema: + description: FireFly uses an extended subset of JSON + Schema to describe parameters, similar to OpenAPI/Swagger. + Converters are available for native blockchain interface + definitions / type systems - such as an Ethereum + ABI. See the documentation for more detail + type: object + type: array + type: object + type: array + name: + description: The name of the FFI - usually matching the smart + contract name + type: string + namespace: + description: The namespace of the FFI + type: string + networkName: + description: The published name of the FFI within the multiparty + network + type: string + published: + description: Indicates if the FFI is published to other members + of the multiparty network + type: boolean version: description: A version for the FFI - use of semantic versioning such as 'v1.0.1' is encouraged @@ -15030,6 +17463,14 @@ paths: namespace: description: The namespace of the FFI type: string + networkName: + description: The published name of the FFI within the multiparty + network + type: string + published: + description: Indicates if the FFI is published to other members + of the multiparty network + type: boolean version: description: A version for the FFI - use of semantic versioning such as 'v1.0.1' is encouraged @@ -16987,6 +19428,11 @@ paths: name: pins schema: type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: rejectreason + schema: + type: string - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' in: query name: sequence @@ -17278,6 +19724,11 @@ paths: name: pins schema: type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: rejectreason + schema: + type: string - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' in: query name: sequence @@ -17502,6 +19953,10 @@ paths: assigned for each topic type: string type: array + rejectReason: + description: If a message was rejected, provides details on the + rejection reason + type: string state: description: The current state of the message enum: @@ -17609,6 +20064,11 @@ paths: name: pins schema: type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: rejectreason + schema: + type: string - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' in: query name: sequence @@ -18960,6 +21420,7 @@ paths: description: The type of the verifier enum: - ethereum_address + - tezos_address - fabric_msp_id - dx_peer_id type: string @@ -19305,6 +21766,7 @@ paths: description: The type of the verifier enum: - ethereum_address + - tezos_address - fabric_msp_id - dx_peer_id type: string @@ -19828,6 +22290,7 @@ paths: description: The type of the verifier enum: - ethereum_address + - tezos_address - fabric_msp_id - dx_peer_id type: string @@ -19926,6 +22389,11 @@ paths: name: pins schema: type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: rejectreason + schema: + type: string - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' in: query name: sequence @@ -20152,6 +22620,10 @@ paths: is assigned for each topic type: string type: array + rejectReason: + description: If a message was rejected, provides details on + the rejection reason + type: string state: description: The current state of the message enum: @@ -20434,6 +22906,10 @@ paths: assigned for each topic type: string type: array + rejectReason: + description: If a message was rejected, provides details on the + rejection reason + type: string state: description: The current state of the message enum: @@ -21142,6 +23618,10 @@ paths: assigned for each topic type: string type: array + rejectReason: + description: If a message was rejected, provides details on the + rejection reason + type: string state: description: The current state of the message enum: @@ -21303,6 +23783,10 @@ paths: assigned for each topic type: string type: array + rejectReason: + description: If a message was rejected, provides details on the + rejection reason + type: string state: description: The current state of the message enum: @@ -21636,6 +24120,10 @@ paths: assigned for each topic type: string type: array + rejectReason: + description: If a message was rejected, provides details on the + rejection reason + type: string state: description: The current state of the message enum: @@ -21797,6 +24285,10 @@ paths: assigned for each topic type: string type: array + rejectReason: + description: If a message was rejected, provides details on the + rejection reason + type: string state: description: The current state of the message enum: @@ -22211,6 +24703,10 @@ paths: assigned for each topic type: string type: array + rejectReason: + description: If a message was rejected, provides details on the + rejection reason + type: string state: description: The current state of the message enum: @@ -22570,6 +25066,7 @@ paths: description: The type of the verifier enum: - ethereum_address + - tezos_address - fabric_msp_id - dx_peer_id type: string @@ -22704,6 +25201,7 @@ paths: description: The type of the verifier enum: - ethereum_address + - tezos_address - fabric_msp_id - dx_peer_id type: string @@ -24239,6 +26737,9 @@ paths: description: The time the operation was created format: date-time type: string + detail: + description: Additional detailed information about an operation + provided by the connector error: description: Any error reported back from the plugin for this operation @@ -24803,6 +27304,7 @@ paths: description: The type of the verifier enum: - ethereum_address + - tezos_address - fabric_msp_id - dx_peer_id type: string @@ -25211,6 +27713,17 @@ paths: options: description: Subscription options properties: + batch: + description: Events are delivered in batches in an ordered + array. The batch size is capped to the readAhead limit. + The event payload is always an array even if there is + a single event in the batch. Commonly used with Webhooks + to allow events to be delivered and acknowledged in batches. + type: boolean + batchTimeout: + description: When batching is enabled, the optional timeout + to send events even when the batch hasn't filled. + type: string fastack: description: 'Webhooks only: When true the event will be acknowledged before the webhook is invoked, allowing parallel @@ -25230,6 +27743,37 @@ paths: description: 'Webhooks only: Static headers to set on the webhook request' type: object + httpOptions: + description: 'Webhooks only: a set of options for HTTP' + properties: + connectionTimeout: + description: The maximum amount of time that a connection + is allowed to remain with no data transmitted. + type: string + expectContinueTimeout: + description: See [ExpectContinueTimeout in the Go docs](https://pkg.go.dev/net/http#Transport) + type: string + idleTimeout: + description: The max duration to hold a HTTP keepalive + connection between calls + type: string + maxIdleConns: + description: The max number of idle connections to hold + pooled + type: integer + proxyURL: + description: HTTP proxy URL to use for outbound requests + to the webhook + type: string + requestTimeout: + description: The max duration to hold a TLS handshake + alive + type: string + tlsHandshakeTimeout: + description: The max duration to hold a TLS handshake + alive + type: string + type: object input: description: 'Webhooks only: A set of options to extract data from the first JSON input data in the incoming message. @@ -25296,6 +27840,31 @@ paths: description: 'Webhooks only: The transaction type to set on the reply message' type: string + retry: + description: 'Webhooks only: a set of options for retrying + the webhook call' + properties: + count: + description: Number of times to retry the webhook call + in case of failure + type: integer + enabled: + description: Enables retry on HTTP calls, defaults to + false + type: boolean + initialDelay: + description: Initial delay between retries when we retry + the webhook call + type: string + maxDelay: + description: Max delay between retries when we retry + the webhookcall + type: string + type: object + tlsConfigName: + description: The name of an existing TLS configuration associated + to the namespace to use + type: string url: description: 'Webhooks only: HTTP url to invoke. Can be relative if a base URL is set in the webhook plugin config' @@ -25433,6 +28002,17 @@ paths: options: description: Subscription options properties: + batch: + description: Events are delivered in batches in an ordered array. + The batch size is capped to the readAhead limit. The event + payload is always an array even if there is a single event + in the batch. Commonly used with Webhooks to allow events + to be delivered and acknowledged in batches. + type: boolean + batchTimeout: + description: When batching is enabled, the optional timeout + to send events even when the batch hasn't filled. + type: string fastack: description: 'Webhooks only: When true the event will be acknowledged before the webhook is invoked, allowing parallel invocations' @@ -25451,6 +28031,35 @@ paths: description: 'Webhooks only: Static headers to set on the webhook request' type: object + httpOptions: + description: 'Webhooks only: a set of options for HTTP' + properties: + connectionTimeout: + description: The maximum amount of time that a connection + is allowed to remain with no data transmitted. + type: string + expectContinueTimeout: + description: See [ExpectContinueTimeout in the Go docs](https://pkg.go.dev/net/http#Transport) + type: string + idleTimeout: + description: The max duration to hold a HTTP keepalive connection + between calls + type: string + maxIdleConns: + description: The max number of idle connections to hold + pooled + type: integer + proxyURL: + description: HTTP proxy URL to use for outbound requests + to the webhook + type: string + requestTimeout: + description: The max duration to hold a TLS handshake alive + type: string + tlsHandshakeTimeout: + description: The max duration to hold a TLS handshake alive + type: string + type: object input: description: 'Webhooks only: A set of options to extract data from the first JSON input data in the incoming message. Only @@ -25515,6 +28124,30 @@ paths: description: 'Webhooks only: The transaction type to set on the reply message' type: string + retry: + description: 'Webhooks only: a set of options for retrying the + webhook call' + properties: + count: + description: Number of times to retry the webhook call in + case of failure + type: integer + enabled: + description: Enables retry on HTTP calls, defaults to false + type: boolean + initialDelay: + description: Initial delay between retries when we retry + the webhook call + type: string + maxDelay: + description: Max delay between retries when we retry the + webhookcall + type: string + type: object + tlsConfigName: + description: The name of an existing TLS configuration associated + to the namespace to use + type: string url: description: 'Webhooks only: HTTP url to invoke. Can be relative if a base URL is set in the webhook plugin config' @@ -25639,6 +28272,17 @@ paths: options: description: Subscription options properties: + batch: + description: Events are delivered in batches in an ordered + array. The batch size is capped to the readAhead limit. + The event payload is always an array even if there is a + single event in the batch. Commonly used with Webhooks to + allow events to be delivered and acknowledged in batches. + type: boolean + batchTimeout: + description: When batching is enabled, the optional timeout + to send events even when the batch hasn't filled. + type: string fastack: description: 'Webhooks only: When true the event will be acknowledged before the webhook is invoked, allowing parallel invocations' @@ -25657,6 +28301,37 @@ paths: description: 'Webhooks only: Static headers to set on the webhook request' type: object + httpOptions: + description: 'Webhooks only: a set of options for HTTP' + properties: + connectionTimeout: + description: The maximum amount of time that a connection + is allowed to remain with no data transmitted. + type: string + expectContinueTimeout: + description: See [ExpectContinueTimeout in the Go docs](https://pkg.go.dev/net/http#Transport) + type: string + idleTimeout: + description: The max duration to hold a HTTP keepalive + connection between calls + type: string + maxIdleConns: + description: The max number of idle connections to hold + pooled + type: integer + proxyURL: + description: HTTP proxy URL to use for outbound requests + to the webhook + type: string + requestTimeout: + description: The max duration to hold a TLS handshake + alive + type: string + tlsHandshakeTimeout: + description: The max duration to hold a TLS handshake + alive + type: string + type: object input: description: 'Webhooks only: A set of options to extract data from the first JSON input data in the incoming message. @@ -25722,6 +28397,31 @@ paths: description: 'Webhooks only: The transaction type to set on the reply message' type: string + retry: + description: 'Webhooks only: a set of options for retrying + the webhook call' + properties: + count: + description: Number of times to retry the webhook call + in case of failure + type: integer + enabled: + description: Enables retry on HTTP calls, defaults to + false + type: boolean + initialDelay: + description: Initial delay between retries when we retry + the webhook call + type: string + maxDelay: + description: Max delay between retries when we retry the + webhookcall + type: string + type: object + tlsConfigName: + description: The name of an existing TLS configuration associated + to the namespace to use + type: string url: description: 'Webhooks only: HTTP url to invoke. Can be relative if a base URL is set in the webhook plugin config' @@ -25857,6 +28557,17 @@ paths: options: description: Subscription options properties: + batch: + description: Events are delivered in batches in an ordered array. + The batch size is capped to the readAhead limit. The event + payload is always an array even if there is a single event + in the batch. Commonly used with Webhooks to allow events + to be delivered and acknowledged in batches. + type: boolean + batchTimeout: + description: When batching is enabled, the optional timeout + to send events even when the batch hasn't filled. + type: string fastack: description: 'Webhooks only: When true the event will be acknowledged before the webhook is invoked, allowing parallel invocations' @@ -25875,6 +28586,35 @@ paths: description: 'Webhooks only: Static headers to set on the webhook request' type: object + httpOptions: + description: 'Webhooks only: a set of options for HTTP' + properties: + connectionTimeout: + description: The maximum amount of time that a connection + is allowed to remain with no data transmitted. + type: string + expectContinueTimeout: + description: See [ExpectContinueTimeout in the Go docs](https://pkg.go.dev/net/http#Transport) + type: string + idleTimeout: + description: The max duration to hold a HTTP keepalive connection + between calls + type: string + maxIdleConns: + description: The max number of idle connections to hold + pooled + type: integer + proxyURL: + description: HTTP proxy URL to use for outbound requests + to the webhook + type: string + requestTimeout: + description: The max duration to hold a TLS handshake alive + type: string + tlsHandshakeTimeout: + description: The max duration to hold a TLS handshake alive + type: string + type: object input: description: 'Webhooks only: A set of options to extract data from the first JSON input data in the incoming message. Only @@ -25939,6 +28679,30 @@ paths: description: 'Webhooks only: The transaction type to set on the reply message' type: string + retry: + description: 'Webhooks only: a set of options for retrying the + webhook call' + properties: + count: + description: Number of times to retry the webhook call in + case of failure + type: integer + enabled: + description: Enables retry on HTTP calls, defaults to false + type: boolean + initialDelay: + description: Initial delay between retries when we retry + the webhook call + type: string + maxDelay: + description: Max delay between retries when we retry the + webhookcall + type: string + type: object + tlsConfigName: + description: The name of an existing TLS configuration associated + to the namespace to use + type: string url: description: 'Webhooks only: HTTP url to invoke. Can be relative if a base URL is set in the webhook plugin config' @@ -26063,6 +28827,17 @@ paths: options: description: Subscription options properties: + batch: + description: Events are delivered in batches in an ordered + array. The batch size is capped to the readAhead limit. + The event payload is always an array even if there is a + single event in the batch. Commonly used with Webhooks to + allow events to be delivered and acknowledged in batches. + type: boolean + batchTimeout: + description: When batching is enabled, the optional timeout + to send events even when the batch hasn't filled. + type: string fastack: description: 'Webhooks only: When true the event will be acknowledged before the webhook is invoked, allowing parallel invocations' @@ -26081,6 +28856,37 @@ paths: description: 'Webhooks only: Static headers to set on the webhook request' type: object + httpOptions: + description: 'Webhooks only: a set of options for HTTP' + properties: + connectionTimeout: + description: The maximum amount of time that a connection + is allowed to remain with no data transmitted. + type: string + expectContinueTimeout: + description: See [ExpectContinueTimeout in the Go docs](https://pkg.go.dev/net/http#Transport) + type: string + idleTimeout: + description: The max duration to hold a HTTP keepalive + connection between calls + type: string + maxIdleConns: + description: The max number of idle connections to hold + pooled + type: integer + proxyURL: + description: HTTP proxy URL to use for outbound requests + to the webhook + type: string + requestTimeout: + description: The max duration to hold a TLS handshake + alive + type: string + tlsHandshakeTimeout: + description: The max duration to hold a TLS handshake + alive + type: string + type: object input: description: 'Webhooks only: A set of options to extract data from the first JSON input data in the incoming message. @@ -26146,6 +28952,31 @@ paths: description: 'Webhooks only: The transaction type to set on the reply message' type: string + retry: + description: 'Webhooks only: a set of options for retrying + the webhook call' + properties: + count: + description: Number of times to retry the webhook call + in case of failure + type: integer + enabled: + description: Enables retry on HTTP calls, defaults to + false + type: boolean + initialDelay: + description: Initial delay between retries when we retry + the webhook call + type: string + maxDelay: + description: Max delay between retries when we retry the + webhookcall + type: string + type: object + tlsConfigName: + description: The name of an existing TLS configuration associated + to the namespace to use + type: string url: description: 'Webhooks only: HTTP url to invoke. Can be relative if a base URL is set in the webhook plugin config' @@ -26343,6 +29174,17 @@ paths: options: description: Subscription options properties: + batch: + description: Events are delivered in batches in an ordered + array. The batch size is capped to the readAhead limit. + The event payload is always an array even if there is a + single event in the batch. Commonly used with Webhooks to + allow events to be delivered and acknowledged in batches. + type: boolean + batchTimeout: + description: When batching is enabled, the optional timeout + to send events even when the batch hasn't filled. + type: string fastack: description: 'Webhooks only: When true the event will be acknowledged before the webhook is invoked, allowing parallel invocations' @@ -26361,6 +29203,37 @@ paths: description: 'Webhooks only: Static headers to set on the webhook request' type: object + httpOptions: + description: 'Webhooks only: a set of options for HTTP' + properties: + connectionTimeout: + description: The maximum amount of time that a connection + is allowed to remain with no data transmitted. + type: string + expectContinueTimeout: + description: See [ExpectContinueTimeout in the Go docs](https://pkg.go.dev/net/http#Transport) + type: string + idleTimeout: + description: The max duration to hold a HTTP keepalive + connection between calls + type: string + maxIdleConns: + description: The max number of idle connections to hold + pooled + type: integer + proxyURL: + description: HTTP proxy URL to use for outbound requests + to the webhook + type: string + requestTimeout: + description: The max duration to hold a TLS handshake + alive + type: string + tlsHandshakeTimeout: + description: The max duration to hold a TLS handshake + alive + type: string + type: object input: description: 'Webhooks only: A set of options to extract data from the first JSON input data in the incoming message. @@ -26426,6 +29299,31 @@ paths: description: 'Webhooks only: The transaction type to set on the reply message' type: string + retry: + description: 'Webhooks only: a set of options for retrying + the webhook call' + properties: + count: + description: Number of times to retry the webhook call + in case of failure + type: integer + enabled: + description: Enables retry on HTTP calls, defaults to + false + type: boolean + initialDelay: + description: Initial delay between retries when we retry + the webhook call + type: string + maxDelay: + description: Max delay between retries when we retry the + webhookcall + type: string + type: object + tlsConfigName: + description: The name of an existing TLS configuration associated + to the namespace to use + type: string url: description: 'Webhooks only: HTTP url to invoke. Can be relative if a base URL is set in the webhook plugin config' @@ -26903,17 +29801,157 @@ paths: that operates the node type: string message: - description: The UUID of a message that has been correlated with - this approval using the data field of the approval in a compatible - token connector - format: uuid - type: string + description: You can specify a message to correlate with the approval, + which can be of type broadcast or private. Your chosen token connector + and on-chain smart contract must support on-chain/off-chain correlation + by taking a `data` input on the approval + properties: + data: + description: For input allows you to specify data in-line in + the message, that will be turned into data attachments. For + output when fetchdata is used on API calls, includes the in-line + data payloads of all data attachments + items: + description: For input allows you to specify data in-line + in the message, that will be turned into data attachments. + For output when fetchdata is used on API calls, includes + the in-line data payloads of all data attachments + properties: + datatype: + description: The optional datatype to use for validation + of the in-line data + properties: + name: + description: The name of the datatype + type: string + version: + description: The version of the datatype. Semantic + versioning is encouraged, such as v1.0.1 + type: string + type: object + id: + description: The UUID of the referenced data resource + format: uuid + type: string + validator: + description: The data validator type to use for in-line + data + type: string + value: + description: The in-line value for the data. Can be any + JSON type - object, array, string, number or boolean + type: object + type: array + group: + description: Allows you to specify details of the private group + of recipients in-line in the message. Alternative to using + the header.group to specify the hash of a group that has been + previously resolved + properties: + members: + description: An array of members of the group. If no identities + local to the sending node are included, then the organization + owner of the local node is added automatically + items: + description: An array of members of the group. If no identities + local to the sending node are included, then the organization + owner of the local node is added automatically + properties: + identity: + description: The DID of the group member. On input + can be a UUID or org name, and will be resolved + to a DID + type: string + node: + description: The UUID of the node that will receive + a copy of the off-chain message for the identity. + The first applicable node for the identity will + be picked automatically on input if not specified + type: string + type: object + type: array + name: + description: Optional name for the group. Allows you to + have multiple separate groups with the same list of participants + type: string + type: object + header: + description: The message header contains all fields that are + used to build the message hash + properties: + author: + description: The DID of identity of the submitter + type: string + cid: + description: The correlation ID of the message. Set this + when a message is a response to another message + format: uuid + type: string + group: + description: Private messages only - the identifier hash + of the privacy group. Derived from the name and member + list of the group + format: byte + type: string + key: + description: The on-chain signing key used to sign the transaction + type: string + tag: + description: The message tag indicates the purpose of the + message to the applications that process it + type: string + topics: + description: A message topic associates this message with + an ordered stream of data. A custom topic should be assigned + - using the default topic is discouraged + items: + description: A message topic associates this message with + an ordered stream of data. A custom topic should be + assigned - using the default topic is discouraged + type: string + type: array + txtype: + description: The type of transaction used to order/deliver + this message + enum: + - none + - unpinned + - batch_pin + - network_action + - token_pool + - token_transfer + - contract_deploy + - contract_invoke + - contract_invoke_pin + - token_approval + - data_publish + type: string + type: + description: The type of the message + enum: + - definition + - broadcast + - private + - groupinit + - transfer_broadcast + - transfer_private + - approval_broadcast + - approval_private + type: string + type: object + idempotencyKey: + description: An optional unique identifier for a message. Cannot + be duplicated within a namespace, thus allowing idempotent + submission of messages to the API. Local only - not transferred + when the message is sent to other members of the network + type: string + type: object operator: description: The blockchain identity that is granted the approval type: string pool: - description: The name or UUID of a token pool. Required if more - than one pool exists. + description: The UUID the token pool this approval applies to + format: uuid type: string type: object responses: @@ -27307,14 +30345,153 @@ paths: the node type: string message: - description: The UUID of a message that has been correlated with - this transfer using the data field of the transfer in a compatible - token connector - format: uuid - type: string + description: You can specify a message to correlate with the transfer, + which can be of type broadcast or private. Your chosen token connector + and on-chain smart contract must support on-chain/off-chain correlation + by taking a `data` input on the transfer + properties: + data: + description: For input allows you to specify data in-line in + the message, that will be turned into data attachments. For + output when fetchdata is used on API calls, includes the in-line + data payloads of all data attachments + items: + description: For input allows you to specify data in-line + in the message, that will be turned into data attachments. + For output when fetchdata is used on API calls, includes + the in-line data payloads of all data attachments + properties: + datatype: + description: The optional datatype to use for validation + of the in-line data + properties: + name: + description: The name of the datatype + type: string + version: + description: The version of the datatype. Semantic + versioning is encouraged, such as v1.0.1 + type: string + type: object + id: + description: The UUID of the referenced data resource + format: uuid + type: string + validator: + description: The data validator type to use for in-line + data + type: string + value: + description: The in-line value for the data. Can be any + JSON type - object, array, string, number or boolean + type: object + type: array + group: + description: Allows you to specify details of the private group + of recipients in-line in the message. Alternative to using + the header.group to specify the hash of a group that has been + previously resolved + properties: + members: + description: An array of members of the group. If no identities + local to the sending node are included, then the organization + owner of the local node is added automatically + items: + description: An array of members of the group. If no identities + local to the sending node are included, then the organization + owner of the local node is added automatically + properties: + identity: + description: The DID of the group member. On input + can be a UUID or org name, and will be resolved + to a DID + type: string + node: + description: The UUID of the node that will receive + a copy of the off-chain message for the identity. + The first applicable node for the identity will + be picked automatically on input if not specified + type: string + type: object + type: array + name: + description: Optional name for the group. Allows you to + have multiple separate groups with the same list of participants + type: string + type: object + header: + description: The message header contains all fields that are + used to build the message hash + properties: + author: + description: The DID of identity of the submitter + type: string + cid: + description: The correlation ID of the message. Set this + when a message is a response to another message + format: uuid + type: string + group: + description: Private messages only - the identifier hash + of the privacy group. Derived from the name and member + list of the group + format: byte + type: string + key: + description: The on-chain signing key used to sign the transaction + type: string + tag: + description: The message tag indicates the purpose of the + message to the applications that process it + type: string + topics: + description: A message topic associates this message with + an ordered stream of data. A custom topic should be assigned + - using the default topic is discouraged + items: + description: A message topic associates this message with + an ordered stream of data. A custom topic should be + assigned - using the default topic is discouraged + type: string + type: array + txtype: + description: The type of transaction used to order/deliver + this message + enum: + - none + - unpinned + - batch_pin + - network_action + - token_pool + - token_transfer + - contract_deploy + - contract_invoke + - contract_invoke_pin + - token_approval + - data_publish + type: string + type: + description: The type of the message + enum: + - definition + - broadcast + - private + - groupinit + - transfer_broadcast + - transfer_private + - approval_broadcast + - approval_private + type: string + type: object + idempotencyKey: + description: An optional unique identifier for a message. Cannot + be duplicated within a namespace, thus allowing idempotent + submission of messages to the API. Local only - not transferred + when the message is sent to other members of the network + type: string + type: object pool: - description: The UUID the token pool this transfer applies to - format: uuid + description: The name or UUID of a token pool type: string to: description: The target account for the transfer. On input defaults @@ -27621,14 +30798,153 @@ paths: the node type: string message: - description: The UUID of a message that has been correlated with - this transfer using the data field of the transfer in a compatible - token connector - format: uuid - type: string + description: You can specify a message to correlate with the transfer, + which can be of type broadcast or private. Your chosen token connector + and on-chain smart contract must support on-chain/off-chain correlation + by taking a `data` input on the transfer + properties: + data: + description: For input allows you to specify data in-line in + the message, that will be turned into data attachments. For + output when fetchdata is used on API calls, includes the in-line + data payloads of all data attachments + items: + description: For input allows you to specify data in-line + in the message, that will be turned into data attachments. + For output when fetchdata is used on API calls, includes + the in-line data payloads of all data attachments + properties: + datatype: + description: The optional datatype to use for validation + of the in-line data + properties: + name: + description: The name of the datatype + type: string + version: + description: The version of the datatype. Semantic + versioning is encouraged, such as v1.0.1 + type: string + type: object + id: + description: The UUID of the referenced data resource + format: uuid + type: string + validator: + description: The data validator type to use for in-line + data + type: string + value: + description: The in-line value for the data. Can be any + JSON type - object, array, string, number or boolean + type: object + type: array + group: + description: Allows you to specify details of the private group + of recipients in-line in the message. Alternative to using + the header.group to specify the hash of a group that has been + previously resolved + properties: + members: + description: An array of members of the group. If no identities + local to the sending node are included, then the organization + owner of the local node is added automatically + items: + description: An array of members of the group. If no identities + local to the sending node are included, then the organization + owner of the local node is added automatically + properties: + identity: + description: The DID of the group member. On input + can be a UUID or org name, and will be resolved + to a DID + type: string + node: + description: The UUID of the node that will receive + a copy of the off-chain message for the identity. + The first applicable node for the identity will + be picked automatically on input if not specified + type: string + type: object + type: array + name: + description: Optional name for the group. Allows you to + have multiple separate groups with the same list of participants + type: string + type: object + header: + description: The message header contains all fields that are + used to build the message hash + properties: + author: + description: The DID of identity of the submitter + type: string + cid: + description: The correlation ID of the message. Set this + when a message is a response to another message + format: uuid + type: string + group: + description: Private messages only - the identifier hash + of the privacy group. Derived from the name and member + list of the group + format: byte + type: string + key: + description: The on-chain signing key used to sign the transaction + type: string + tag: + description: The message tag indicates the purpose of the + message to the applications that process it + type: string + topics: + description: A message topic associates this message with + an ordered stream of data. A custom topic should be assigned + - using the default topic is discouraged + items: + description: A message topic associates this message with + an ordered stream of data. A custom topic should be + assigned - using the default topic is discouraged + type: string + type: array + txtype: + description: The type of transaction used to order/deliver + this message + enum: + - none + - unpinned + - batch_pin + - network_action + - token_pool + - token_transfer + - contract_deploy + - contract_invoke + - contract_invoke_pin + - token_approval + - data_publish + type: string + type: + description: The type of the message + enum: + - definition + - broadcast + - private + - groupinit + - transfer_broadcast + - transfer_private + - approval_broadcast + - approval_private + type: string + type: object + idempotencyKey: + description: An optional unique identifier for a message. Cannot + be duplicated within a namespace, thus allowing idempotent + submission of messages to the API. Local only - not transferred + when the message is sent to other members of the network + type: string + type: object pool: - description: The UUID the token pool this transfer applies to - format: uuid + description: The name or UUID of a token pool type: string to: description: The target account for the transfer. On input defaults @@ -27858,6 +31174,11 @@ paths: schema: default: 2m0s type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: active + schema: + type: string - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' in: query name: connector @@ -27905,12 +31226,17 @@ paths: type: string - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' in: query - name: standard + name: networkname schema: type: string - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' in: query - name: state + name: published + schema: + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: standard schema: type: string - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' @@ -27974,6 +31300,10 @@ paths: schema: items: properties: + active: + description: Indicates whether the pool has been successfully + activated with the token connector + type: boolean connector: description: The name of the token connector, as specified in the FireFly core configuration file that is responsible for @@ -28033,7 +31363,7 @@ paths: type: string message: description: The UUID of the broadcast message used to inform - the network to index this pool + the network about this pool format: uuid type: string methods: @@ -28046,16 +31376,18 @@ paths: namespace: description: The namespace for the token pool type: string + networkName: + description: The published name of the token pool within the + multiparty network + type: string + published: + description: Indicates if the token pool is published to other + members of the multiparty network + type: boolean standard: description: The ERC standard the token pool conforms to, as reported by the token connector type: string - state: - description: The current state of the token pool - enum: - - pending - - confirmed - type: string symbol: description: The token symbol. If supplied on input for an existing on-chain token, this must match the on-chain information @@ -28101,6 +31433,12 @@ paths: name: confirm schema: type: string + - description: When true the definition will be published to all other members + of the multiparty network + in: query + name: publish + schema: + type: string - description: Server-side request timeout (milliseconds, or set a custom suffix like 10s) in: header @@ -28158,6 +31496,10 @@ paths: description: The name of the token pool. Note the name is not validated against the description of the token on the blockchain type: string + networkName: + description: The published name of the token pool within the multiparty + network + type: string symbol: description: The token symbol. If supplied on input for an existing on-chain token, this must match the on-chain information @@ -28175,6 +31517,10 @@ paths: application/json: schema: properties: + active: + description: Indicates whether the pool has been successfully + activated with the token connector + type: boolean connector: description: The name of the token connector, as specified in the FireFly core configuration file that is responsible for @@ -28233,7 +31579,7 @@ paths: type: string message: description: The UUID of the broadcast message used to inform - the network to index this pool + the network about this pool format: uuid type: string methods: @@ -28246,16 +31592,18 @@ paths: namespace: description: The namespace for the token pool type: string + networkName: + description: The published name of the token pool within the multiparty + network + type: string + published: + description: Indicates if the token pool is published to other + members of the multiparty network + type: boolean standard: description: The ERC standard the token pool conforms to, as reported by the token connector type: string - state: - description: The current state of the token pool - enum: - - pending - - confirmed - type: string symbol: description: The token symbol. If supplied on input for an existing on-chain token, this must match the on-chain information @@ -28285,6 +31633,10 @@ paths: application/json: schema: properties: + active: + description: Indicates whether the pool has been successfully + activated with the token connector + type: boolean connector: description: The name of the token connector, as specified in the FireFly core configuration file that is responsible for @@ -28343,7 +31695,7 @@ paths: type: string message: description: The UUID of the broadcast message used to inform - the network to index this pool + the network about this pool format: uuid type: string methods: @@ -28356,16 +31708,18 @@ paths: namespace: description: The namespace for the token pool type: string + networkName: + description: The published name of the token pool within the multiparty + network + type: string + published: + description: Indicates if the token pool is published to other + members of the multiparty network + type: boolean standard: description: The ERC standard the token pool conforms to, as reported by the token connector type: string - state: - description: The current state of the token pool - enum: - - pending - - confirmed - type: string symbol: description: The token symbol. If supplied on input for an existing on-chain token, this must match the on-chain information @@ -28395,6 +31749,39 @@ paths: tags: - Non-Default Namespace /namespaces/{ns}/tokens/pools/{nameOrId}: + delete: + description: Delete a token pool + operationId: deleteTokenPoolNamespace + parameters: + - description: The token pool name or ID + in: path + name: nameOrId + required: true + schema: + type: string + - description: The namespace which scopes this request + in: path + name: ns + required: true + schema: + example: default + type: string + - description: Server-side request timeout (milliseconds, or set a custom suffix + like 10s) + in: header + name: Request-Timeout + schema: + default: 2m0s + type: string + responses: + "204": + content: + application/json: {} + description: Success + default: + description: "" + tags: + - Non-Default Namespace get: description: Gets a token pool by its name or its ID operationId: getTokenPoolByNameOrIDNamespace @@ -28425,6 +31812,287 @@ paths: application/json: schema: properties: + active: + description: Indicates whether the pool has been successfully + activated with the token connector + type: boolean + connector: + description: The name of the token connector, as specified in + the FireFly core configuration file that is responsible for + the token pool. Required on input when multiple token connectors + are configured + type: string + created: + description: The creation time of the pool + format: date-time + type: string + decimals: + description: Number of decimal places that this token has + type: integer + id: + description: The UUID of the token pool + format: uuid + type: string + info: + additionalProperties: + description: Token connector specific information about the + pool. See your chosen token connector documentation for details + description: Token connector specific information about the pool. + See your chosen token connector documentation for details + type: object + interface: + description: A reference to an existing FFI, containing pre-registered + type information for the token contract + properties: + id: + description: The UUID of the FireFly interface + format: uuid + type: string + name: + description: The name of the FireFly interface + type: string + version: + description: The version of the FireFly interface + type: string + type: object + interfaceFormat: + description: The interface encoding format supported by the connector + for this token pool + enum: + - abi + - ffi + type: string + key: + description: The signing key used to create the token pool. On + input for token connectors that support on-chain deployment + of new tokens (vs. only index existing ones) this determines + the signing key used to create the token on-chain + type: string + locator: + description: A unique identifier for the pool, as provided by + the token connector + type: string + message: + description: The UUID of the broadcast message used to inform + the network about this pool + format: uuid + type: string + methods: + description: The method definitions resolved by the token connector + to be used by each token operation + name: + description: The name of the token pool. Note the name is not + validated against the description of the token on the blockchain + type: string + namespace: + description: The namespace for the token pool + type: string + networkName: + description: The published name of the token pool within the multiparty + network + type: string + published: + description: Indicates if the token pool is published to other + members of the multiparty network + type: boolean + standard: + description: The ERC standard the token pool conforms to, as reported + by the token connector + type: string + symbol: + description: The token symbol. If supplied on input for an existing + on-chain token, this must match the on-chain information + type: string + tx: + description: Reference to the FireFly transaction used to create + and broadcast this pool to the network + properties: + id: + description: The UUID of the FireFly transaction + format: uuid + type: string + type: + description: The type of the FireFly transaction + type: string + type: object + type: + description: The type of token the pool contains, such as fungible/non-fungible + enum: + - fungible + - nonfungible + type: string + type: object + description: Success + default: + description: "" + tags: + - Non-Default Namespace + /namespaces/{ns}/tokens/pools/{nameOrId}/publish: + post: + description: Publish a token pool to all other members of the multiparty network + operationId: postTokenPoolPublishNamespace + parameters: + - description: The token pool name or ID + in: path + name: nameOrId + required: true + schema: + type: string + - description: The namespace which scopes this request + in: path + name: ns + required: true + schema: + example: default + type: string + - description: When true the HTTP request blocks until the message is confirmed + in: query + name: confirm + schema: + type: string + - description: Server-side request timeout (milliseconds, or set a custom suffix + like 10s) + in: header + name: Request-Timeout + schema: + default: 2m0s + type: string + requestBody: + content: + application/json: + schema: + properties: + networkName: + description: An optional name to be used for publishing this definition + to the multiparty network, which may differ from the local name + type: string + type: object + responses: + "200": + content: + application/json: + schema: + properties: + active: + description: Indicates whether the pool has been successfully + activated with the token connector + type: boolean + connector: + description: The name of the token connector, as specified in + the FireFly core configuration file that is responsible for + the token pool. Required on input when multiple token connectors + are configured + type: string + created: + description: The creation time of the pool + format: date-time + type: string + decimals: + description: Number of decimal places that this token has + type: integer + id: + description: The UUID of the token pool + format: uuid + type: string + info: + additionalProperties: + description: Token connector specific information about the + pool. See your chosen token connector documentation for details + description: Token connector specific information about the pool. + See your chosen token connector documentation for details + type: object + interface: + description: A reference to an existing FFI, containing pre-registered + type information for the token contract + properties: + id: + description: The UUID of the FireFly interface + format: uuid + type: string + name: + description: The name of the FireFly interface + type: string + version: + description: The version of the FireFly interface + type: string + type: object + interfaceFormat: + description: The interface encoding format supported by the connector + for this token pool + enum: + - abi + - ffi + type: string + key: + description: The signing key used to create the token pool. On + input for token connectors that support on-chain deployment + of new tokens (vs. only index existing ones) this determines + the signing key used to create the token on-chain + type: string + locator: + description: A unique identifier for the pool, as provided by + the token connector + type: string + message: + description: The UUID of the broadcast message used to inform + the network about this pool + format: uuid + type: string + methods: + description: The method definitions resolved by the token connector + to be used by each token operation + name: + description: The name of the token pool. Note the name is not + validated against the description of the token on the blockchain + type: string + namespace: + description: The namespace for the token pool + type: string + networkName: + description: The published name of the token pool within the multiparty + network + type: string + published: + description: Indicates if the token pool is published to other + members of the multiparty network + type: boolean + standard: + description: The ERC standard the token pool conforms to, as reported + by the token connector + type: string + symbol: + description: The token symbol. If supplied on input for an existing + on-chain token, this must match the on-chain information + type: string + tx: + description: Reference to the FireFly transaction used to create + and broadcast this pool to the network + properties: + id: + description: The UUID of the FireFly transaction + format: uuid + type: string + type: + description: The type of the FireFly transaction + type: string + type: object + type: + description: The type of token the pool contains, such as fungible/non-fungible + enum: + - fungible + - nonfungible + type: string + type: object + description: Success + "202": + content: + application/json: + schema: + properties: + active: + description: Indicates whether the pool has been successfully + activated with the token connector + type: boolean connector: description: The name of the token connector, as specified in the FireFly core configuration file that is responsible for @@ -28483,7 +32151,7 @@ paths: type: string message: description: The UUID of the broadcast message used to inform - the network to index this pool + the network about this pool format: uuid type: string methods: @@ -28496,16 +32164,18 @@ paths: namespace: description: The namespace for the token pool type: string + networkName: + description: The published name of the token pool within the multiparty + network + type: string + published: + description: Indicates if the token pool is published to other + members of the multiparty network + type: boolean standard: description: The ERC standard the token pool conforms to, as reported by the token connector type: string - state: - description: The current state of the token pool - enum: - - pending - - confirmed - type: string symbol: description: The token symbol. If supplied on input for an existing on-chain token, this must match the on-chain information @@ -28838,14 +32508,153 @@ paths: the node type: string message: - description: The UUID of a message that has been correlated with - this transfer using the data field of the transfer in a compatible - token connector - format: uuid - type: string + description: You can specify a message to correlate with the transfer, + which can be of type broadcast or private. Your chosen token connector + and on-chain smart contract must support on-chain/off-chain correlation + by taking a `data` input on the transfer + properties: + data: + description: For input allows you to specify data in-line in + the message, that will be turned into data attachments. For + output when fetchdata is used on API calls, includes the in-line + data payloads of all data attachments + items: + description: For input allows you to specify data in-line + in the message, that will be turned into data attachments. + For output when fetchdata is used on API calls, includes + the in-line data payloads of all data attachments + properties: + datatype: + description: The optional datatype to use for validation + of the in-line data + properties: + name: + description: The name of the datatype + type: string + version: + description: The version of the datatype. Semantic + versioning is encouraged, such as v1.0.1 + type: string + type: object + id: + description: The UUID of the referenced data resource + format: uuid + type: string + validator: + description: The data validator type to use for in-line + data + type: string + value: + description: The in-line value for the data. Can be any + JSON type - object, array, string, number or boolean + type: object + type: array + group: + description: Allows you to specify details of the private group + of recipients in-line in the message. Alternative to using + the header.group to specify the hash of a group that has been + previously resolved + properties: + members: + description: An array of members of the group. If no identities + local to the sending node are included, then the organization + owner of the local node is added automatically + items: + description: An array of members of the group. If no identities + local to the sending node are included, then the organization + owner of the local node is added automatically + properties: + identity: + description: The DID of the group member. On input + can be a UUID or org name, and will be resolved + to a DID + type: string + node: + description: The UUID of the node that will receive + a copy of the off-chain message for the identity. + The first applicable node for the identity will + be picked automatically on input if not specified + type: string + type: object + type: array + name: + description: Optional name for the group. Allows you to + have multiple separate groups with the same list of participants + type: string + type: object + header: + description: The message header contains all fields that are + used to build the message hash + properties: + author: + description: The DID of identity of the submitter + type: string + cid: + description: The correlation ID of the message. Set this + when a message is a response to another message + format: uuid + type: string + group: + description: Private messages only - the identifier hash + of the privacy group. Derived from the name and member + list of the group + format: byte + type: string + key: + description: The on-chain signing key used to sign the transaction + type: string + tag: + description: The message tag indicates the purpose of the + message to the applications that process it + type: string + topics: + description: A message topic associates this message with + an ordered stream of data. A custom topic should be assigned + - using the default topic is discouraged + items: + description: A message topic associates this message with + an ordered stream of data. A custom topic should be + assigned - using the default topic is discouraged + type: string + type: array + txtype: + description: The type of transaction used to order/deliver + this message + enum: + - none + - unpinned + - batch_pin + - network_action + - token_pool + - token_transfer + - contract_deploy + - contract_invoke + - contract_invoke_pin + - token_approval + - data_publish + type: string + type: + description: The type of the message + enum: + - definition + - broadcast + - private + - groupinit + - transfer_broadcast + - transfer_private + - approval_broadcast + - approval_private + type: string + type: object + idempotencyKey: + description: An optional unique identifier for a message. Cannot + be duplicated within a namespace, thus allowing idempotent + submission of messages to the API. Local only - not transferred + when the message is sent to other members of the network + type: string + type: object pool: - description: The UUID the token pool this transfer applies to - format: uuid + description: The name or UUID of a token pool type: string to: description: The target account for the transfer. On input defaults @@ -29858,6 +33667,7 @@ paths: description: The type of the verifier enum: - ethereum_address + - tezos_address - fabric_msp_id - dx_peer_id type: string @@ -29926,6 +33736,7 @@ paths: description: The type of the verifier enum: - ethereum_address + - tezos_address - fabric_msp_id - dx_peer_id type: string @@ -29967,6 +33778,7 @@ paths: description: The type of the verifier enum: - ethereum_address + - tezos_address - fabric_msp_id - dx_peer_id type: string @@ -29985,6 +33797,7 @@ paths: description: The type of the verifier enum: - ethereum_address + - tezos_address - fabric_msp_id - dx_peer_id type: string @@ -30315,6 +34128,7 @@ paths: description: The type of the verifier enum: - ethereum_address + - tezos_address - fabric_msp_id - dx_peer_id type: string @@ -30442,6 +34256,7 @@ paths: description: The type of the verifier enum: - ethereum_address + - tezos_address - fabric_msp_id - dx_peer_id type: string @@ -31907,6 +35722,9 @@ paths: description: The time the operation was created format: date-time type: string + detail: + description: Additional detailed information about an operation + provided by the connector error: description: Any error reported back from the plugin for this operation @@ -32443,6 +36261,7 @@ paths: description: The type of the verifier enum: - ethereum_address + - tezos_address - fabric_msp_id - dx_peer_id type: string @@ -32837,6 +36656,17 @@ paths: options: description: Subscription options properties: + batch: + description: Events are delivered in batches in an ordered + array. The batch size is capped to the readAhead limit. + The event payload is always an array even if there is + a single event in the batch. Commonly used with Webhooks + to allow events to be delivered and acknowledged in batches. + type: boolean + batchTimeout: + description: When batching is enabled, the optional timeout + to send events even when the batch hasn't filled. + type: string fastack: description: 'Webhooks only: When true the event will be acknowledged before the webhook is invoked, allowing parallel @@ -32856,6 +36686,37 @@ paths: description: 'Webhooks only: Static headers to set on the webhook request' type: object + httpOptions: + description: 'Webhooks only: a set of options for HTTP' + properties: + connectionTimeout: + description: The maximum amount of time that a connection + is allowed to remain with no data transmitted. + type: string + expectContinueTimeout: + description: See [ExpectContinueTimeout in the Go docs](https://pkg.go.dev/net/http#Transport) + type: string + idleTimeout: + description: The max duration to hold a HTTP keepalive + connection between calls + type: string + maxIdleConns: + description: The max number of idle connections to hold + pooled + type: integer + proxyURL: + description: HTTP proxy URL to use for outbound requests + to the webhook + type: string + requestTimeout: + description: The max duration to hold a TLS handshake + alive + type: string + tlsHandshakeTimeout: + description: The max duration to hold a TLS handshake + alive + type: string + type: object input: description: 'Webhooks only: A set of options to extract data from the first JSON input data in the incoming message. @@ -32922,6 +36783,31 @@ paths: description: 'Webhooks only: The transaction type to set on the reply message' type: string + retry: + description: 'Webhooks only: a set of options for retrying + the webhook call' + properties: + count: + description: Number of times to retry the webhook call + in case of failure + type: integer + enabled: + description: Enables retry on HTTP calls, defaults to + false + type: boolean + initialDelay: + description: Initial delay between retries when we retry + the webhook call + type: string + maxDelay: + description: Max delay between retries when we retry + the webhookcall + type: string + type: object + tlsConfigName: + description: The name of an existing TLS configuration associated + to the namespace to use + type: string url: description: 'Webhooks only: HTTP url to invoke. Can be relative if a base URL is set in the webhook plugin config' @@ -33052,6 +36938,17 @@ paths: options: description: Subscription options properties: + batch: + description: Events are delivered in batches in an ordered array. + The batch size is capped to the readAhead limit. The event + payload is always an array even if there is a single event + in the batch. Commonly used with Webhooks to allow events + to be delivered and acknowledged in batches. + type: boolean + batchTimeout: + description: When batching is enabled, the optional timeout + to send events even when the batch hasn't filled. + type: string fastack: description: 'Webhooks only: When true the event will be acknowledged before the webhook is invoked, allowing parallel invocations' @@ -33070,6 +36967,35 @@ paths: description: 'Webhooks only: Static headers to set on the webhook request' type: object + httpOptions: + description: 'Webhooks only: a set of options for HTTP' + properties: + connectionTimeout: + description: The maximum amount of time that a connection + is allowed to remain with no data transmitted. + type: string + expectContinueTimeout: + description: See [ExpectContinueTimeout in the Go docs](https://pkg.go.dev/net/http#Transport) + type: string + idleTimeout: + description: The max duration to hold a HTTP keepalive connection + between calls + type: string + maxIdleConns: + description: The max number of idle connections to hold + pooled + type: integer + proxyURL: + description: HTTP proxy URL to use for outbound requests + to the webhook + type: string + requestTimeout: + description: The max duration to hold a TLS handshake alive + type: string + tlsHandshakeTimeout: + description: The max duration to hold a TLS handshake alive + type: string + type: object input: description: 'Webhooks only: A set of options to extract data from the first JSON input data in the incoming message. Only @@ -33134,6 +37060,30 @@ paths: description: 'Webhooks only: The transaction type to set on the reply message' type: string + retry: + description: 'Webhooks only: a set of options for retrying the + webhook call' + properties: + count: + description: Number of times to retry the webhook call in + case of failure + type: integer + enabled: + description: Enables retry on HTTP calls, defaults to false + type: boolean + initialDelay: + description: Initial delay between retries when we retry + the webhook call + type: string + maxDelay: + description: Max delay between retries when we retry the + webhookcall + type: string + type: object + tlsConfigName: + description: The name of an existing TLS configuration associated + to the namespace to use + type: string url: description: 'Webhooks only: HTTP url to invoke. Can be relative if a base URL is set in the webhook plugin config' @@ -33258,6 +37208,17 @@ paths: options: description: Subscription options properties: + batch: + description: Events are delivered in batches in an ordered + array. The batch size is capped to the readAhead limit. + The event payload is always an array even if there is a + single event in the batch. Commonly used with Webhooks to + allow events to be delivered and acknowledged in batches. + type: boolean + batchTimeout: + description: When batching is enabled, the optional timeout + to send events even when the batch hasn't filled. + type: string fastack: description: 'Webhooks only: When true the event will be acknowledged before the webhook is invoked, allowing parallel invocations' @@ -33276,6 +37237,37 @@ paths: description: 'Webhooks only: Static headers to set on the webhook request' type: object + httpOptions: + description: 'Webhooks only: a set of options for HTTP' + properties: + connectionTimeout: + description: The maximum amount of time that a connection + is allowed to remain with no data transmitted. + type: string + expectContinueTimeout: + description: See [ExpectContinueTimeout in the Go docs](https://pkg.go.dev/net/http#Transport) + type: string + idleTimeout: + description: The max duration to hold a HTTP keepalive + connection between calls + type: string + maxIdleConns: + description: The max number of idle connections to hold + pooled + type: integer + proxyURL: + description: HTTP proxy URL to use for outbound requests + to the webhook + type: string + requestTimeout: + description: The max duration to hold a TLS handshake + alive + type: string + tlsHandshakeTimeout: + description: The max duration to hold a TLS handshake + alive + type: string + type: object input: description: 'Webhooks only: A set of options to extract data from the first JSON input data in the incoming message. @@ -33341,6 +37333,31 @@ paths: description: 'Webhooks only: The transaction type to set on the reply message' type: string + retry: + description: 'Webhooks only: a set of options for retrying + the webhook call' + properties: + count: + description: Number of times to retry the webhook call + in case of failure + type: integer + enabled: + description: Enables retry on HTTP calls, defaults to + false + type: boolean + initialDelay: + description: Initial delay between retries when we retry + the webhook call + type: string + maxDelay: + description: Max delay between retries when we retry the + webhookcall + type: string + type: object + tlsConfigName: + description: The name of an existing TLS configuration associated + to the namespace to use + type: string url: description: 'Webhooks only: HTTP url to invoke. Can be relative if a base URL is set in the webhook plugin config' @@ -33469,6 +37486,17 @@ paths: options: description: Subscription options properties: + batch: + description: Events are delivered in batches in an ordered array. + The batch size is capped to the readAhead limit. The event + payload is always an array even if there is a single event + in the batch. Commonly used with Webhooks to allow events + to be delivered and acknowledged in batches. + type: boolean + batchTimeout: + description: When batching is enabled, the optional timeout + to send events even when the batch hasn't filled. + type: string fastack: description: 'Webhooks only: When true the event will be acknowledged before the webhook is invoked, allowing parallel invocations' @@ -33487,6 +37515,35 @@ paths: description: 'Webhooks only: Static headers to set on the webhook request' type: object + httpOptions: + description: 'Webhooks only: a set of options for HTTP' + properties: + connectionTimeout: + description: The maximum amount of time that a connection + is allowed to remain with no data transmitted. + type: string + expectContinueTimeout: + description: See [ExpectContinueTimeout in the Go docs](https://pkg.go.dev/net/http#Transport) + type: string + idleTimeout: + description: The max duration to hold a HTTP keepalive connection + between calls + type: string + maxIdleConns: + description: The max number of idle connections to hold + pooled + type: integer + proxyURL: + description: HTTP proxy URL to use for outbound requests + to the webhook + type: string + requestTimeout: + description: The max duration to hold a TLS handshake alive + type: string + tlsHandshakeTimeout: + description: The max duration to hold a TLS handshake alive + type: string + type: object input: description: 'Webhooks only: A set of options to extract data from the first JSON input data in the incoming message. Only @@ -33551,6 +37608,30 @@ paths: description: 'Webhooks only: The transaction type to set on the reply message' type: string + retry: + description: 'Webhooks only: a set of options for retrying the + webhook call' + properties: + count: + description: Number of times to retry the webhook call in + case of failure + type: integer + enabled: + description: Enables retry on HTTP calls, defaults to false + type: boolean + initialDelay: + description: Initial delay between retries when we retry + the webhook call + type: string + maxDelay: + description: Max delay between retries when we retry the + webhookcall + type: string + type: object + tlsConfigName: + description: The name of an existing TLS configuration associated + to the namespace to use + type: string url: description: 'Webhooks only: HTTP url to invoke. Can be relative if a base URL is set in the webhook plugin config' @@ -33675,6 +37756,17 @@ paths: options: description: Subscription options properties: + batch: + description: Events are delivered in batches in an ordered + array. The batch size is capped to the readAhead limit. + The event payload is always an array even if there is a + single event in the batch. Commonly used with Webhooks to + allow events to be delivered and acknowledged in batches. + type: boolean + batchTimeout: + description: When batching is enabled, the optional timeout + to send events even when the batch hasn't filled. + type: string fastack: description: 'Webhooks only: When true the event will be acknowledged before the webhook is invoked, allowing parallel invocations' @@ -33693,6 +37785,37 @@ paths: description: 'Webhooks only: Static headers to set on the webhook request' type: object + httpOptions: + description: 'Webhooks only: a set of options for HTTP' + properties: + connectionTimeout: + description: The maximum amount of time that a connection + is allowed to remain with no data transmitted. + type: string + expectContinueTimeout: + description: See [ExpectContinueTimeout in the Go docs](https://pkg.go.dev/net/http#Transport) + type: string + idleTimeout: + description: The max duration to hold a HTTP keepalive + connection between calls + type: string + maxIdleConns: + description: The max number of idle connections to hold + pooled + type: integer + proxyURL: + description: HTTP proxy URL to use for outbound requests + to the webhook + type: string + requestTimeout: + description: The max duration to hold a TLS handshake + alive + type: string + tlsHandshakeTimeout: + description: The max duration to hold a TLS handshake + alive + type: string + type: object input: description: 'Webhooks only: A set of options to extract data from the first JSON input data in the incoming message. @@ -33758,6 +37881,31 @@ paths: description: 'Webhooks only: The transaction type to set on the reply message' type: string + retry: + description: 'Webhooks only: a set of options for retrying + the webhook call' + properties: + count: + description: Number of times to retry the webhook call + in case of failure + type: integer + enabled: + description: Enables retry on HTTP calls, defaults to + false + type: boolean + initialDelay: + description: Initial delay between retries when we retry + the webhook call + type: string + maxDelay: + description: Max delay between retries when we retry the + webhookcall + type: string + type: object + tlsConfigName: + description: The name of an existing TLS configuration associated + to the namespace to use + type: string url: description: 'Webhooks only: HTTP url to invoke. Can be relative if a base URL is set in the webhook plugin config' @@ -33941,6 +38089,17 @@ paths: options: description: Subscription options properties: + batch: + description: Events are delivered in batches in an ordered + array. The batch size is capped to the readAhead limit. + The event payload is always an array even if there is a + single event in the batch. Commonly used with Webhooks to + allow events to be delivered and acknowledged in batches. + type: boolean + batchTimeout: + description: When batching is enabled, the optional timeout + to send events even when the batch hasn't filled. + type: string fastack: description: 'Webhooks only: When true the event will be acknowledged before the webhook is invoked, allowing parallel invocations' @@ -33959,6 +38118,37 @@ paths: description: 'Webhooks only: Static headers to set on the webhook request' type: object + httpOptions: + description: 'Webhooks only: a set of options for HTTP' + properties: + connectionTimeout: + description: The maximum amount of time that a connection + is allowed to remain with no data transmitted. + type: string + expectContinueTimeout: + description: See [ExpectContinueTimeout in the Go docs](https://pkg.go.dev/net/http#Transport) + type: string + idleTimeout: + description: The max duration to hold a HTTP keepalive + connection between calls + type: string + maxIdleConns: + description: The max number of idle connections to hold + pooled + type: integer + proxyURL: + description: HTTP proxy URL to use for outbound requests + to the webhook + type: string + requestTimeout: + description: The max duration to hold a TLS handshake + alive + type: string + tlsHandshakeTimeout: + description: The max duration to hold a TLS handshake + alive + type: string + type: object input: description: 'Webhooks only: A set of options to extract data from the first JSON input data in the incoming message. @@ -34024,6 +38214,31 @@ paths: description: 'Webhooks only: The transaction type to set on the reply message' type: string + retry: + description: 'Webhooks only: a set of options for retrying + the webhook call' + properties: + count: + description: Number of times to retry the webhook call + in case of failure + type: integer + enabled: + description: Enables retry on HTTP calls, defaults to + false + type: boolean + initialDelay: + description: Initial delay between retries when we retry + the webhook call + type: string + maxDelay: + description: Max delay between retries when we retry the + webhookcall + type: string + type: object + tlsConfigName: + description: The name of an existing TLS configuration associated + to the namespace to use + type: string url: description: 'Webhooks only: HTTP url to invoke. Can be relative if a base URL is set in the webhook plugin config' @@ -34473,17 +38688,157 @@ paths: that operates the node type: string message: - description: The UUID of a message that has been correlated with - this approval using the data field of the approval in a compatible - token connector - format: uuid - type: string + description: You can specify a message to correlate with the approval, + which can be of type broadcast or private. Your chosen token connector + and on-chain smart contract must support on-chain/off-chain correlation + by taking a `data` input on the approval + properties: + data: + description: For input allows you to specify data in-line in + the message, that will be turned into data attachments. For + output when fetchdata is used on API calls, includes the in-line + data payloads of all data attachments + items: + description: For input allows you to specify data in-line + in the message, that will be turned into data attachments. + For output when fetchdata is used on API calls, includes + the in-line data payloads of all data attachments + properties: + datatype: + description: The optional datatype to use for validation + of the in-line data + properties: + name: + description: The name of the datatype + type: string + version: + description: The version of the datatype. Semantic + versioning is encouraged, such as v1.0.1 + type: string + type: object + id: + description: The UUID of the referenced data resource + format: uuid + type: string + validator: + description: The data validator type to use for in-line + data + type: string + value: + description: The in-line value for the data. Can be any + JSON type - object, array, string, number or boolean + type: object + type: array + group: + description: Allows you to specify details of the private group + of recipients in-line in the message. Alternative to using + the header.group to specify the hash of a group that has been + previously resolved + properties: + members: + description: An array of members of the group. If no identities + local to the sending node are included, then the organization + owner of the local node is added automatically + items: + description: An array of members of the group. If no identities + local to the sending node are included, then the organization + owner of the local node is added automatically + properties: + identity: + description: The DID of the group member. On input + can be a UUID or org name, and will be resolved + to a DID + type: string + node: + description: The UUID of the node that will receive + a copy of the off-chain message for the identity. + The first applicable node for the identity will + be picked automatically on input if not specified + type: string + type: object + type: array + name: + description: Optional name for the group. Allows you to + have multiple separate groups with the same list of participants + type: string + type: object + header: + description: The message header contains all fields that are + used to build the message hash + properties: + author: + description: The DID of identity of the submitter + type: string + cid: + description: The correlation ID of the message. Set this + when a message is a response to another message + format: uuid + type: string + group: + description: Private messages only - the identifier hash + of the privacy group. Derived from the name and member + list of the group + format: byte + type: string + key: + description: The on-chain signing key used to sign the transaction + type: string + tag: + description: The message tag indicates the purpose of the + message to the applications that process it + type: string + topics: + description: A message topic associates this message with + an ordered stream of data. A custom topic should be assigned + - using the default topic is discouraged + items: + description: A message topic associates this message with + an ordered stream of data. A custom topic should be + assigned - using the default topic is discouraged + type: string + type: array + txtype: + description: The type of transaction used to order/deliver + this message + enum: + - none + - unpinned + - batch_pin + - network_action + - token_pool + - token_transfer + - contract_deploy + - contract_invoke + - contract_invoke_pin + - token_approval + - data_publish + type: string + type: + description: The type of the message + enum: + - definition + - broadcast + - private + - groupinit + - transfer_broadcast + - transfer_private + - approval_broadcast + - approval_private + type: string + type: object + idempotencyKey: + description: An optional unique identifier for a message. Cannot + be duplicated within a namespace, thus allowing idempotent + submission of messages to the API. Local only - not transferred + when the message is sent to other members of the network + type: string + type: object operator: description: The blockchain identity that is granted the approval type: string pool: - description: The name or UUID of a token pool. Required if more - than one pool exists. + description: The UUID the token pool this approval applies to + format: uuid type: string type: object responses: @@ -34863,14 +39218,153 @@ paths: the node type: string message: - description: The UUID of a message that has been correlated with - this transfer using the data field of the transfer in a compatible - token connector - format: uuid - type: string + description: You can specify a message to correlate with the transfer, + which can be of type broadcast or private. Your chosen token connector + and on-chain smart contract must support on-chain/off-chain correlation + by taking a `data` input on the transfer + properties: + data: + description: For input allows you to specify data in-line in + the message, that will be turned into data attachments. For + output when fetchdata is used on API calls, includes the in-line + data payloads of all data attachments + items: + description: For input allows you to specify data in-line + in the message, that will be turned into data attachments. + For output when fetchdata is used on API calls, includes + the in-line data payloads of all data attachments + properties: + datatype: + description: The optional datatype to use for validation + of the in-line data + properties: + name: + description: The name of the datatype + type: string + version: + description: The version of the datatype. Semantic + versioning is encouraged, such as v1.0.1 + type: string + type: object + id: + description: The UUID of the referenced data resource + format: uuid + type: string + validator: + description: The data validator type to use for in-line + data + type: string + value: + description: The in-line value for the data. Can be any + JSON type - object, array, string, number or boolean + type: object + type: array + group: + description: Allows you to specify details of the private group + of recipients in-line in the message. Alternative to using + the header.group to specify the hash of a group that has been + previously resolved + properties: + members: + description: An array of members of the group. If no identities + local to the sending node are included, then the organization + owner of the local node is added automatically + items: + description: An array of members of the group. If no identities + local to the sending node are included, then the organization + owner of the local node is added automatically + properties: + identity: + description: The DID of the group member. On input + can be a UUID or org name, and will be resolved + to a DID + type: string + node: + description: The UUID of the node that will receive + a copy of the off-chain message for the identity. + The first applicable node for the identity will + be picked automatically on input if not specified + type: string + type: object + type: array + name: + description: Optional name for the group. Allows you to + have multiple separate groups with the same list of participants + type: string + type: object + header: + description: The message header contains all fields that are + used to build the message hash + properties: + author: + description: The DID of identity of the submitter + type: string + cid: + description: The correlation ID of the message. Set this + when a message is a response to another message + format: uuid + type: string + group: + description: Private messages only - the identifier hash + of the privacy group. Derived from the name and member + list of the group + format: byte + type: string + key: + description: The on-chain signing key used to sign the transaction + type: string + tag: + description: The message tag indicates the purpose of the + message to the applications that process it + type: string + topics: + description: A message topic associates this message with + an ordered stream of data. A custom topic should be assigned + - using the default topic is discouraged + items: + description: A message topic associates this message with + an ordered stream of data. A custom topic should be + assigned - using the default topic is discouraged + type: string + type: array + txtype: + description: The type of transaction used to order/deliver + this message + enum: + - none + - unpinned + - batch_pin + - network_action + - token_pool + - token_transfer + - contract_deploy + - contract_invoke + - contract_invoke_pin + - token_approval + - data_publish + type: string + type: + description: The type of the message + enum: + - definition + - broadcast + - private + - groupinit + - transfer_broadcast + - transfer_private + - approval_broadcast + - approval_private + type: string + type: object + idempotencyKey: + description: An optional unique identifier for a message. Cannot + be duplicated within a namespace, thus allowing idempotent + submission of messages to the API. Local only - not transferred + when the message is sent to other members of the network + type: string + type: object pool: - description: The UUID the token pool this transfer applies to - format: uuid + description: The name or UUID of a token pool type: string tokenIndex: description: The index of the token within the pool that this transfer @@ -35101,68 +39595,207 @@ paths: in the FireFly core configuration file type: string type: object - type: array - description: Success - default: - description: "" - tags: - - Default Namespace - /tokens/mint: - post: - description: Mints some tokens - operationId: postTokenMint - parameters: - - description: When true the HTTP request blocks until the message is confirmed - in: query - name: confirm - schema: - type: string - - description: Server-side request timeout (milliseconds, or set a custom suffix - like 10s) - in: header - name: Request-Timeout - schema: - default: 2m0s - type: string - requestBody: - content: - application/json: - schema: - properties: - amount: - description: The amount for the transfer. For non-fungible tokens - will always be 1. For fungible tokens, the number of decimals - for the token pool should be considered when inputting the amount. - For example, with 18 decimals a fractional balance of 10.234 will - be specified as 10,234,000,000,000,000,000 - type: string - config: - additionalProperties: - description: Input only field, with token connector specific configuration - of the transfer. See your chosen token connector documentation - for details - description: Input only field, with token connector specific configuration - of the transfer. See your chosen token connector documentation - for details - type: object - idempotencyKey: - description: An optional identifier to allow idempotent submission - of requests. Stored on the transaction uniquely within a namespace - type: string - key: - description: The blockchain signing key for the transfer. On input - defaults to the first signing key of the organization that operates - the node - type: string - message: - description: The UUID of a message that has been correlated with - this transfer using the data field of the transfer in a compatible - token connector - format: uuid - type: string + type: array + description: Success + default: + description: "" + tags: + - Default Namespace + /tokens/mint: + post: + description: Mints some tokens + operationId: postTokenMint + parameters: + - description: When true the HTTP request blocks until the message is confirmed + in: query + name: confirm + schema: + type: string + - description: Server-side request timeout (milliseconds, or set a custom suffix + like 10s) + in: header + name: Request-Timeout + schema: + default: 2m0s + type: string + requestBody: + content: + application/json: + schema: + properties: + amount: + description: The amount for the transfer. For non-fungible tokens + will always be 1. For fungible tokens, the number of decimals + for the token pool should be considered when inputting the amount. + For example, with 18 decimals a fractional balance of 10.234 will + be specified as 10,234,000,000,000,000,000 + type: string + config: + additionalProperties: + description: Input only field, with token connector specific configuration + of the transfer. See your chosen token connector documentation + for details + description: Input only field, with token connector specific configuration + of the transfer. See your chosen token connector documentation + for details + type: object + idempotencyKey: + description: An optional identifier to allow idempotent submission + of requests. Stored on the transaction uniquely within a namespace + type: string + key: + description: The blockchain signing key for the transfer. On input + defaults to the first signing key of the organization that operates + the node + type: string + message: + description: You can specify a message to correlate with the transfer, + which can be of type broadcast or private. Your chosen token connector + and on-chain smart contract must support on-chain/off-chain correlation + by taking a `data` input on the transfer + properties: + data: + description: For input allows you to specify data in-line in + the message, that will be turned into data attachments. For + output when fetchdata is used on API calls, includes the in-line + data payloads of all data attachments + items: + description: For input allows you to specify data in-line + in the message, that will be turned into data attachments. + For output when fetchdata is used on API calls, includes + the in-line data payloads of all data attachments + properties: + datatype: + description: The optional datatype to use for validation + of the in-line data + properties: + name: + description: The name of the datatype + type: string + version: + description: The version of the datatype. Semantic + versioning is encouraged, such as v1.0.1 + type: string + type: object + id: + description: The UUID of the referenced data resource + format: uuid + type: string + validator: + description: The data validator type to use for in-line + data + type: string + value: + description: The in-line value for the data. Can be any + JSON type - object, array, string, number or boolean + type: object + type: array + group: + description: Allows you to specify details of the private group + of recipients in-line in the message. Alternative to using + the header.group to specify the hash of a group that has been + previously resolved + properties: + members: + description: An array of members of the group. If no identities + local to the sending node are included, then the organization + owner of the local node is added automatically + items: + description: An array of members of the group. If no identities + local to the sending node are included, then the organization + owner of the local node is added automatically + properties: + identity: + description: The DID of the group member. On input + can be a UUID or org name, and will be resolved + to a DID + type: string + node: + description: The UUID of the node that will receive + a copy of the off-chain message for the identity. + The first applicable node for the identity will + be picked automatically on input if not specified + type: string + type: object + type: array + name: + description: Optional name for the group. Allows you to + have multiple separate groups with the same list of participants + type: string + type: object + header: + description: The message header contains all fields that are + used to build the message hash + properties: + author: + description: The DID of identity of the submitter + type: string + cid: + description: The correlation ID of the message. Set this + when a message is a response to another message + format: uuid + type: string + group: + description: Private messages only - the identifier hash + of the privacy group. Derived from the name and member + list of the group + format: byte + type: string + key: + description: The on-chain signing key used to sign the transaction + type: string + tag: + description: The message tag indicates the purpose of the + message to the applications that process it + type: string + topics: + description: A message topic associates this message with + an ordered stream of data. A custom topic should be assigned + - using the default topic is discouraged + items: + description: A message topic associates this message with + an ordered stream of data. A custom topic should be + assigned - using the default topic is discouraged + type: string + type: array + txtype: + description: The type of transaction used to order/deliver + this message + enum: + - none + - unpinned + - batch_pin + - network_action + - token_pool + - token_transfer + - contract_deploy + - contract_invoke + - contract_invoke_pin + - token_approval + - data_publish + type: string + type: + description: The type of the message + enum: + - definition + - broadcast + - private + - groupinit + - transfer_broadcast + - transfer_private + - approval_broadcast + - approval_private + type: string + type: object + idempotencyKey: + description: An optional unique identifier for a message. Cannot + be duplicated within a namespace, thus allowing idempotent + submission of messages to the API. Local only - not transferred + when the message is sent to other members of the network + type: string + type: object pool: - description: The UUID the token pool this transfer applies to - format: uuid + description: The name or UUID of a token pool type: string to: description: The target account for the transfer. On input defaults @@ -35385,6 +40018,11 @@ paths: schema: default: 2m0s type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: active + schema: + type: string - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' in: query name: connector @@ -35432,12 +40070,17 @@ paths: type: string - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' in: query - name: standard + name: networkname schema: type: string - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' in: query - name: state + name: published + schema: + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: standard schema: type: string - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' @@ -35501,6 +40144,10 @@ paths: schema: items: properties: + active: + description: Indicates whether the pool has been successfully + activated with the token connector + type: boolean connector: description: The name of the token connector, as specified in the FireFly core configuration file that is responsible for @@ -35560,7 +40207,7 @@ paths: type: string message: description: The UUID of the broadcast message used to inform - the network to index this pool + the network about this pool format: uuid type: string methods: @@ -35573,16 +40220,18 @@ paths: namespace: description: The namespace for the token pool type: string + networkName: + description: The published name of the token pool within the + multiparty network + type: string + published: + description: Indicates if the token pool is published to other + members of the multiparty network + type: boolean standard: description: The ERC standard the token pool conforms to, as reported by the token connector type: string - state: - description: The current state of the token pool - enum: - - pending - - confirmed - type: string symbol: description: The token symbol. If supplied on input for an existing on-chain token, this must match the on-chain information @@ -35621,6 +40270,12 @@ paths: name: confirm schema: type: string + - description: When true the definition will be published to all other members + of the multiparty network + in: query + name: publish + schema: + type: string - description: Server-side request timeout (milliseconds, or set a custom suffix like 10s) in: header @@ -35678,6 +40333,10 @@ paths: description: The name of the token pool. Note the name is not validated against the description of the token on the blockchain type: string + networkName: + description: The published name of the token pool within the multiparty + network + type: string symbol: description: The token symbol. If supplied on input for an existing on-chain token, this must match the on-chain information @@ -35695,6 +40354,445 @@ paths: application/json: schema: properties: + active: + description: Indicates whether the pool has been successfully + activated with the token connector + type: boolean + connector: + description: The name of the token connector, as specified in + the FireFly core configuration file that is responsible for + the token pool. Required on input when multiple token connectors + are configured + type: string + created: + description: The creation time of the pool + format: date-time + type: string + decimals: + description: Number of decimal places that this token has + type: integer + id: + description: The UUID of the token pool + format: uuid + type: string + info: + additionalProperties: + description: Token connector specific information about the + pool. See your chosen token connector documentation for details + description: Token connector specific information about the pool. + See your chosen token connector documentation for details + type: object + interface: + description: A reference to an existing FFI, containing pre-registered + type information for the token contract + properties: + id: + description: The UUID of the FireFly interface + format: uuid + type: string + name: + description: The name of the FireFly interface + type: string + version: + description: The version of the FireFly interface + type: string + type: object + interfaceFormat: + description: The interface encoding format supported by the connector + for this token pool + enum: + - abi + - ffi + type: string + key: + description: The signing key used to create the token pool. On + input for token connectors that support on-chain deployment + of new tokens (vs. only index existing ones) this determines + the signing key used to create the token on-chain + type: string + locator: + description: A unique identifier for the pool, as provided by + the token connector + type: string + message: + description: The UUID of the broadcast message used to inform + the network about this pool + format: uuid + type: string + methods: + description: The method definitions resolved by the token connector + to be used by each token operation + name: + description: The name of the token pool. Note the name is not + validated against the description of the token on the blockchain + type: string + namespace: + description: The namespace for the token pool + type: string + networkName: + description: The published name of the token pool within the multiparty + network + type: string + published: + description: Indicates if the token pool is published to other + members of the multiparty network + type: boolean + standard: + description: The ERC standard the token pool conforms to, as reported + by the token connector + type: string + symbol: + description: The token symbol. If supplied on input for an existing + on-chain token, this must match the on-chain information + type: string + tx: + description: Reference to the FireFly transaction used to create + and broadcast this pool to the network + properties: + id: + description: The UUID of the FireFly transaction + format: uuid + type: string + type: + description: The type of the FireFly transaction + type: string + type: object + type: + description: The type of token the pool contains, such as fungible/non-fungible + enum: + - fungible + - nonfungible + type: string + type: object + description: Success + "202": + content: + application/json: + schema: + properties: + active: + description: Indicates whether the pool has been successfully + activated with the token connector + type: boolean + connector: + description: The name of the token connector, as specified in + the FireFly core configuration file that is responsible for + the token pool. Required on input when multiple token connectors + are configured + type: string + created: + description: The creation time of the pool + format: date-time + type: string + decimals: + description: Number of decimal places that this token has + type: integer + id: + description: The UUID of the token pool + format: uuid + type: string + info: + additionalProperties: + description: Token connector specific information about the + pool. See your chosen token connector documentation for details + description: Token connector specific information about the pool. + See your chosen token connector documentation for details + type: object + interface: + description: A reference to an existing FFI, containing pre-registered + type information for the token contract + properties: + id: + description: The UUID of the FireFly interface + format: uuid + type: string + name: + description: The name of the FireFly interface + type: string + version: + description: The version of the FireFly interface + type: string + type: object + interfaceFormat: + description: The interface encoding format supported by the connector + for this token pool + enum: + - abi + - ffi + type: string + key: + description: The signing key used to create the token pool. On + input for token connectors that support on-chain deployment + of new tokens (vs. only index existing ones) this determines + the signing key used to create the token on-chain + type: string + locator: + description: A unique identifier for the pool, as provided by + the token connector + type: string + message: + description: The UUID of the broadcast message used to inform + the network about this pool + format: uuid + type: string + methods: + description: The method definitions resolved by the token connector + to be used by each token operation + name: + description: The name of the token pool. Note the name is not + validated against the description of the token on the blockchain + type: string + namespace: + description: The namespace for the token pool + type: string + networkName: + description: The published name of the token pool within the multiparty + network + type: string + published: + description: Indicates if the token pool is published to other + members of the multiparty network + type: boolean + standard: + description: The ERC standard the token pool conforms to, as reported + by the token connector + type: string + symbol: + description: The token symbol. If supplied on input for an existing + on-chain token, this must match the on-chain information + type: string + tx: + description: Reference to the FireFly transaction used to create + and broadcast this pool to the network + properties: + id: + description: The UUID of the FireFly transaction + format: uuid + type: string + type: + description: The type of the FireFly transaction + type: string + type: object + type: + description: The type of token the pool contains, such as fungible/non-fungible + enum: + - fungible + - nonfungible + type: string + type: object + description: Success + default: + description: "" + tags: + - Default Namespace + /tokens/pools/{nameOrId}: + delete: + description: Delete a token pool + operationId: deleteTokenPool + parameters: + - description: The token pool name or ID + in: path + name: nameOrId + required: true + schema: + type: string + - description: Server-side request timeout (milliseconds, or set a custom suffix + like 10s) + in: header + name: Request-Timeout + schema: + default: 2m0s + type: string + responses: + "204": + content: + application/json: {} + description: Success + default: + description: "" + tags: + - Default Namespace + get: + description: Gets a token pool by its name or its ID + operationId: getTokenPoolByNameOrID + parameters: + - description: The token pool name or ID + in: path + name: nameOrId + required: true + schema: + type: string + - description: Server-side request timeout (milliseconds, or set a custom suffix + like 10s) + in: header + name: Request-Timeout + schema: + default: 2m0s + type: string + responses: + "200": + content: + application/json: + schema: + properties: + active: + description: Indicates whether the pool has been successfully + activated with the token connector + type: boolean + connector: + description: The name of the token connector, as specified in + the FireFly core configuration file that is responsible for + the token pool. Required on input when multiple token connectors + are configured + type: string + created: + description: The creation time of the pool + format: date-time + type: string + decimals: + description: Number of decimal places that this token has + type: integer + id: + description: The UUID of the token pool + format: uuid + type: string + info: + additionalProperties: + description: Token connector specific information about the + pool. See your chosen token connector documentation for details + description: Token connector specific information about the pool. + See your chosen token connector documentation for details + type: object + interface: + description: A reference to an existing FFI, containing pre-registered + type information for the token contract + properties: + id: + description: The UUID of the FireFly interface + format: uuid + type: string + name: + description: The name of the FireFly interface + type: string + version: + description: The version of the FireFly interface + type: string + type: object + interfaceFormat: + description: The interface encoding format supported by the connector + for this token pool + enum: + - abi + - ffi + type: string + key: + description: The signing key used to create the token pool. On + input for token connectors that support on-chain deployment + of new tokens (vs. only index existing ones) this determines + the signing key used to create the token on-chain + type: string + locator: + description: A unique identifier for the pool, as provided by + the token connector + type: string + message: + description: The UUID of the broadcast message used to inform + the network about this pool + format: uuid + type: string + methods: + description: The method definitions resolved by the token connector + to be used by each token operation + name: + description: The name of the token pool. Note the name is not + validated against the description of the token on the blockchain + type: string + namespace: + description: The namespace for the token pool + type: string + networkName: + description: The published name of the token pool within the multiparty + network + type: string + published: + description: Indicates if the token pool is published to other + members of the multiparty network + type: boolean + standard: + description: The ERC standard the token pool conforms to, as reported + by the token connector + type: string + symbol: + description: The token symbol. If supplied on input for an existing + on-chain token, this must match the on-chain information + type: string + tx: + description: Reference to the FireFly transaction used to create + and broadcast this pool to the network + properties: + id: + description: The UUID of the FireFly transaction + format: uuid + type: string + type: + description: The type of the FireFly transaction + type: string + type: object + type: + description: The type of token the pool contains, such as fungible/non-fungible + enum: + - fungible + - nonfungible + type: string + type: object + description: Success + default: + description: "" + tags: + - Default Namespace + /tokens/pools/{nameOrId}/publish: + post: + description: Publish a token pool to all other members of the multiparty network + operationId: postTokenPoolPublish + parameters: + - description: The token pool name or ID + in: path + name: nameOrId + required: true + schema: + type: string + - description: When true the HTTP request blocks until the message is confirmed + in: query + name: confirm + schema: + type: string + - description: Server-side request timeout (milliseconds, or set a custom suffix + like 10s) + in: header + name: Request-Timeout + schema: + default: 2m0s + type: string + requestBody: + content: + application/json: + schema: + properties: + networkName: + description: An optional name to be used for publishing this definition + to the multiparty network, which may differ from the local name + type: string + type: object + responses: + "200": + content: + application/json: + schema: + properties: + active: + description: Indicates whether the pool has been successfully + activated with the token connector + type: boolean connector: description: The name of the token connector, as specified in the FireFly core configuration file that is responsible for @@ -35753,7 +40851,7 @@ paths: type: string message: description: The UUID of the broadcast message used to inform - the network to index this pool + the network about this pool format: uuid type: string methods: @@ -35766,16 +40864,18 @@ paths: namespace: description: The namespace for the token pool type: string + networkName: + description: The published name of the token pool within the multiparty + network + type: string + published: + description: Indicates if the token pool is published to other + members of the multiparty network + type: boolean standard: description: The ERC standard the token pool conforms to, as reported by the token connector type: string - state: - description: The current state of the token pool - enum: - - pending - - confirmed - type: string symbol: description: The token symbol. If supplied on input for an existing on-chain token, this must match the on-chain information @@ -35805,6 +40905,10 @@ paths: application/json: schema: properties: + active: + description: Indicates whether the pool has been successfully + activated with the token connector + type: boolean connector: description: The name of the token connector, as specified in the FireFly core configuration file that is responsible for @@ -35863,7 +40967,7 @@ paths: type: string message: description: The UUID of the broadcast message used to inform - the network to index this pool + the network about this pool format: uuid type: string methods: @@ -35876,149 +40980,18 @@ paths: namespace: description: The namespace for the token pool type: string - standard: - description: The ERC standard the token pool conforms to, as reported - by the token connector - type: string - state: - description: The current state of the token pool - enum: - - pending - - confirmed - type: string - symbol: - description: The token symbol. If supplied on input for an existing - on-chain token, this must match the on-chain information - type: string - tx: - description: Reference to the FireFly transaction used to create - and broadcast this pool to the network - properties: - id: - description: The UUID of the FireFly transaction - format: uuid - type: string - type: - description: The type of the FireFly transaction - type: string - type: object - type: - description: The type of token the pool contains, such as fungible/non-fungible - enum: - - fungible - - nonfungible - type: string - type: object - description: Success - default: - description: "" - tags: - - Default Namespace - /tokens/pools/{nameOrId}: - get: - description: Gets a token pool by its name or its ID - operationId: getTokenPoolByNameOrID - parameters: - - description: The token pool name or ID - in: path - name: nameOrId - required: true - schema: - type: string - - description: Server-side request timeout (milliseconds, or set a custom suffix - like 10s) - in: header - name: Request-Timeout - schema: - default: 2m0s - type: string - responses: - "200": - content: - application/json: - schema: - properties: - connector: - description: The name of the token connector, as specified in - the FireFly core configuration file that is responsible for - the token pool. Required on input when multiple token connectors - are configured - type: string - created: - description: The creation time of the pool - format: date-time - type: string - decimals: - description: Number of decimal places that this token has - type: integer - id: - description: The UUID of the token pool - format: uuid - type: string - info: - additionalProperties: - description: Token connector specific information about the - pool. See your chosen token connector documentation for details - description: Token connector specific information about the pool. - See your chosen token connector documentation for details - type: object - interface: - description: A reference to an existing FFI, containing pre-registered - type information for the token contract - properties: - id: - description: The UUID of the FireFly interface - format: uuid - type: string - name: - description: The name of the FireFly interface - type: string - version: - description: The version of the FireFly interface - type: string - type: object - interfaceFormat: - description: The interface encoding format supported by the connector - for this token pool - enum: - - abi - - ffi - type: string - key: - description: The signing key used to create the token pool. On - input for token connectors that support on-chain deployment - of new tokens (vs. only index existing ones) this determines - the signing key used to create the token on-chain - type: string - locator: - description: A unique identifier for the pool, as provided by - the token connector - type: string - message: - description: The UUID of the broadcast message used to inform - the network to index this pool - format: uuid - type: string - methods: - description: The method definitions resolved by the token connector - to be used by each token operation - name: - description: The name of the token pool. Note the name is not - validated against the description of the token on the blockchain - type: string - namespace: - description: The namespace for the token pool + networkName: + description: The published name of the token pool within the multiparty + network type: string + published: + description: Indicates if the token pool is published to other + members of the multiparty network + type: boolean standard: description: The ERC standard the token pool conforms to, as reported by the token connector type: string - state: - description: The current state of the token pool - enum: - - pending - - confirmed - type: string symbol: description: The token symbol. If supplied on input for an existing on-chain token, this must match the on-chain information @@ -36337,14 +41310,153 @@ paths: the node type: string message: - description: The UUID of a message that has been correlated with - this transfer using the data field of the transfer in a compatible - token connector - format: uuid - type: string + description: You can specify a message to correlate with the transfer, + which can be of type broadcast or private. Your chosen token connector + and on-chain smart contract must support on-chain/off-chain correlation + by taking a `data` input on the transfer + properties: + data: + description: For input allows you to specify data in-line in + the message, that will be turned into data attachments. For + output when fetchdata is used on API calls, includes the in-line + data payloads of all data attachments + items: + description: For input allows you to specify data in-line + in the message, that will be turned into data attachments. + For output when fetchdata is used on API calls, includes + the in-line data payloads of all data attachments + properties: + datatype: + description: The optional datatype to use for validation + of the in-line data + properties: + name: + description: The name of the datatype + type: string + version: + description: The version of the datatype. Semantic + versioning is encouraged, such as v1.0.1 + type: string + type: object + id: + description: The UUID of the referenced data resource + format: uuid + type: string + validator: + description: The data validator type to use for in-line + data + type: string + value: + description: The in-line value for the data. Can be any + JSON type - object, array, string, number or boolean + type: object + type: array + group: + description: Allows you to specify details of the private group + of recipients in-line in the message. Alternative to using + the header.group to specify the hash of a group that has been + previously resolved + properties: + members: + description: An array of members of the group. If no identities + local to the sending node are included, then the organization + owner of the local node is added automatically + items: + description: An array of members of the group. If no identities + local to the sending node are included, then the organization + owner of the local node is added automatically + properties: + identity: + description: The DID of the group member. On input + can be a UUID or org name, and will be resolved + to a DID + type: string + node: + description: The UUID of the node that will receive + a copy of the off-chain message for the identity. + The first applicable node for the identity will + be picked automatically on input if not specified + type: string + type: object + type: array + name: + description: Optional name for the group. Allows you to + have multiple separate groups with the same list of participants + type: string + type: object + header: + description: The message header contains all fields that are + used to build the message hash + properties: + author: + description: The DID of identity of the submitter + type: string + cid: + description: The correlation ID of the message. Set this + when a message is a response to another message + format: uuid + type: string + group: + description: Private messages only - the identifier hash + of the privacy group. Derived from the name and member + list of the group + format: byte + type: string + key: + description: The on-chain signing key used to sign the transaction + type: string + tag: + description: The message tag indicates the purpose of the + message to the applications that process it + type: string + topics: + description: A message topic associates this message with + an ordered stream of data. A custom topic should be assigned + - using the default topic is discouraged + items: + description: A message topic associates this message with + an ordered stream of data. A custom topic should be + assigned - using the default topic is discouraged + type: string + type: array + txtype: + description: The type of transaction used to order/deliver + this message + enum: + - none + - unpinned + - batch_pin + - network_action + - token_pool + - token_transfer + - contract_deploy + - contract_invoke + - contract_invoke_pin + - token_approval + - data_publish + type: string + type: + description: The type of the message + enum: + - definition + - broadcast + - private + - groupinit + - transfer_broadcast + - transfer_private + - approval_broadcast + - approval_private + type: string + type: object + idempotencyKey: + description: An optional unique identifier for a message. Cannot + be duplicated within a namespace, thus allowing idempotent + submission of messages to the API. Local only - not transferred + when the message is sent to other members of the network + type: string + type: object pool: - description: The UUID the token pool this transfer applies to - format: uuid + description: The name or UUID of a token pool type: string to: description: The target account for the transfer. On input defaults @@ -37308,6 +42420,7 @@ paths: description: The type of the verifier enum: - ethereum_address + - tezos_address - fabric_msp_id - dx_peer_id type: string @@ -37369,6 +42482,7 @@ paths: description: The type of the verifier enum: - ethereum_address + - tezos_address - fabric_msp_id - dx_peer_id type: string @@ -37403,6 +42517,7 @@ paths: description: The type of the verifier enum: - ethereum_address + - tezos_address - fabric_msp_id - dx_peer_id type: string @@ -37421,6 +42536,7 @@ paths: description: The type of the verifier enum: - ethereum_address + - tezos_address - fabric_msp_id - dx_peer_id type: string @@ -37588,4 +42704,4 @@ paths: tags: - Global servers: -- url: http://localhost:5000 +- url: http://localhost:5000/api/v1 diff --git a/docs/tutorials/chains/images/tezos_explorer.png b/docs/tutorials/chains/images/tezos_explorer.png new file mode 100644 index 0000000000..ec418f0066 Binary files /dev/null and b/docs/tutorials/chains/images/tezos_explorer.png differ diff --git a/docs/tutorials/chains/images/tezos_faucet.png b/docs/tutorials/chains/images/tezos_faucet.png new file mode 100644 index 0000000000..99eed33154 Binary files /dev/null and b/docs/tutorials/chains/images/tezos_faucet.png differ diff --git a/docs/tutorials/chains/tezos_testnet.md b/docs/tutorials/chains/tezos_testnet.md new file mode 100644 index 0000000000..02ae39f9ce --- /dev/null +++ b/docs/tutorials/chains/tezos_testnet.md @@ -0,0 +1,101 @@ +--- +layout: i18n_page +title: pages.tezos_testnet +parent: pages.chains +grand_parent: pages.tutorials +nav_order: 6 +--- + +# {%t pages.tezos_testnet %} +{: .no_toc } + +1. TOC +{:toc} + +--- + +This guide will walk you through the steps to create a local FireFly development environment and connect it to the public Tezos Ghostnet testnet. + +## Previous steps: Install the FireFly CLI + +If you haven't set up the FireFly CLI already, please go back to the Getting Started guide and read the section on how to [Install the FireFly CLI](../../gettingstarted/firefly_cli.md). + +[← ① Install the FireFly CLI](../../gettingstarted/firefly_cli.md){: .btn .btn-purple .mb-5} + +## Set up the transaction signing service + +[Signatory](https://signatory.io/) service allows to work with many different key-management systems.\ +By default, FF uses [local signing](https://signatory.io/docs/file_based) option.\ +However, it is also possible to configure the transaction signing service using key management systems such as: AWS/Google/Azure KMS, HCP Vault, etc. +> **NOTE**: The default option is not secure and is mainly used for development and demo purposes. Therefore, for the production, use the selected KMS.\ +The full list can be found [here](https://github.com/ecadlabs/signatory#backend-kmshsm-support-status). + +## Creating a new stack + +To create a local FireFly development stack and connect it to the Tezos Ghostnet testnet, we will use command line flags to customize the following settings: + +- Create a new Tezos based stack named `tezos` with `1` member +- Disable `multiparty` mode. We are going to be using this FireFly node as a Web3 gateway, and we don't need to communicate with a consortium here +- See the list of Tezos [public RPC nodes](https://tezostaquito.io/docs/rpc_nodes/) and select an HTTPS RPC node. + +To do this, run the following command: + +``` +ff init tezos dev 1 \ + --multiparty=false \ + --remote-node-url +``` + +> **NOTE**: The public RPC nodes may have limitations or may not support all FF required RPC endpoints. Therefore it's not recommended to use ones for production and you may need to run own node or use third-party vendors. + +## Start the stack + +Now you should be able to start your stack by running: + +``` +ff start dev +``` + +After some time it should print out the following: + +``` +Web UI for member '0': http://127.0.0.1:5000/ui +Sandbox UI for member '0': http://127.0.0.1:5109 + + +To see logs for your stack run: + +ff logs dev +``` + +## Get some XTZ + +At this point you should have a working FireFly stack, talking to a public chain. However, you won't be able to run any transactions just yet, because you don't have any way to pay transaction fee. A testnet faucet can give us some XTZ, the native token for Tezos. + +First, you need to get an account address, which was created during [signer set up](#signatory) step.\ +To check that, you can run: +``` +ff accounts list dev +[ + { + "address": "tz1cuFw1E2Mn2bVS8q8d7QoCb6FXC18JivSp", + "privateKey": "..." + } +] +``` + + +After that, go to [Tezos Ghostnet Faucet](https://faucet.ghostnet.teztnets.xyz/) and paste the address in the form and click the **Request** button. + +![Tezos Faucet](images/tezos_faucet.png) + +### Confirm the transaction on TzStats +You should be able to go lookup your account on [TzStats for the Ghostnet testnet](https://ghost.tzstats.com/) and see that you now have a balance of 100 XTZ (or 2001 XTZ accordingly). Simply paste in your account address to search for it. + +On the **Transfers** tab from you account page you will see the actual transfer of the XTZ from the faucet. + +![TzStats](images/tezos_explorer.png) + +## Use the public testnet + +Now that you have everything set up, you can follow one of the other FireFly guides such as [Custom Smart Contracts](../custom_contracts/tezos.md). For detailed instructions on deploying a custom smart contract to Tezos, please see the [Tezos docs](https://docs.tezos.com/smart-contracts/deploying) for instructions using various tools. \ No newline at end of file diff --git a/docs/tutorials/custom_contracts/ethereum.md b/docs/tutorials/custom_contracts/ethereum.md index cd58e666f2..bcf79993cc 100644 --- a/docs/tutorials/custom_contracts/ethereum.md +++ b/docs/tutorials/custom_contracts/ethereum.md @@ -14,11 +14,10 @@ This guide describes the steps to deploy a smart contract to an Ethereum blockch > **NOTE:** This guide assumes that you are running a local FireFly stack with at least 2 members and an Ethereum blockchain created by the FireFly CLI. If you need help getting that set up, please see the [Getting Started guide to Start your environment](../../gettingstarted/setup_env.html). ## Table of contents - {: .no_toc .text-delta } 1. TOC - {:toc} +{:toc} --- diff --git a/docs/tutorials/custom_contracts/fabric.md b/docs/tutorials/custom_contracts/fabric.md index cccd4ce97e..e7f2df33b4 100644 --- a/docs/tutorials/custom_contracts/fabric.md +++ b/docs/tutorials/custom_contracts/fabric.md @@ -15,11 +15,10 @@ This guide describes the steps to deploy a chaincode to a Hyperledger Fabric blo > **NOTE:** This guide assumes that you are running a local FireFly stack with at least 2 members and a Fabric blockchain created by the FireFly CLI. If you need help getting that set up, please see the [Getting Started guide to Start your environment](../../gettingstarted/setup_env.html). ## Table of contents - {: .no_toc .text-delta } 1. TOC - {:toc} +{:toc} --- diff --git a/docs/tutorials/custom_contracts/images/simple_storage_swagger.png b/docs/tutorials/custom_contracts/images/simple_storage_swagger.png new file mode 100644 index 0000000000..1d49237f26 Binary files /dev/null and b/docs/tutorials/custom_contracts/images/simple_storage_swagger.png differ diff --git a/docs/tutorials/custom_contracts/images/tezos_contract_deployment.png b/docs/tutorials/custom_contracts/images/tezos_contract_deployment.png new file mode 100644 index 0000000000..5f17421e4c Binary files /dev/null and b/docs/tutorials/custom_contracts/images/tezos_contract_deployment.png differ diff --git a/docs/tutorials/custom_contracts/images/tezos_contract_deployment2.png b/docs/tutorials/custom_contracts/images/tezos_contract_deployment2.png new file mode 100644 index 0000000000..8b84d17946 Binary files /dev/null and b/docs/tutorials/custom_contracts/images/tezos_contract_deployment2.png differ diff --git a/docs/tutorials/custom_contracts/pinning.md b/docs/tutorials/custom_contracts/pinning.md index 187ad7c4b3..2f263e5532 100644 --- a/docs/tutorials/custom_contracts/pinning.md +++ b/docs/tutorials/custom_contracts/pinning.md @@ -3,7 +3,7 @@ layout: default title: Pinning Data parent: pages.custom_smart_contracts grand_parent: pages.tutorials -nav_order: 3 +nav_order: 4 --- # Pin off-chain data to a custom blockchain transaction @@ -18,11 +18,10 @@ This guide describes how to associate an arbitrary off-chain payload with a bloc > fundamentals of how FireFly interacts with [custom contracts](../). ## Table of contents - {: .no_toc .text-delta } 1. TOC - {:toc} +{:toc} --- diff --git a/docs/tutorials/custom_contracts/tezos.md b/docs/tutorials/custom_contracts/tezos.md new file mode 100644 index 0000000000..eb5a297c33 --- /dev/null +++ b/docs/tutorials/custom_contracts/tezos.md @@ -0,0 +1,539 @@ +--- +layout: default +title: Tezos +parent: pages.custom_smart_contracts +grand_parent: pages.tutorials +nav_order: 3 +--- + +# Work with Tezos smart contracts + +{: .no_toc } +This guide describes the steps to deploy a smart contract to a Tezos blockchain and use FireFly to interact with it in order to submit transactions, query for states and listening for events. + +## Table of contents +{: .no_toc .text-delta } + +1. TOC +{:toc} + +--- + +## Smart Contract Languages + +Smart contracts on Tezos can be programmed using familiar, developer-friendly languages. All features available on Tezos can be written in any of the high-level languages used to write smart contracts, such as Archetype, LIGO, and SmartPy. These languages all compile down to [Michelton](https://tezos.gitlab.io/active/michelson.html) and you can switch between languages based on your preferences and projects. + +> **NOTE:** For this tutorial we are going to use [SmartPy](https://smartpy.io/) for building Tezos smart contracts utilizing the broadly adopted Python language. + +## Example smart contract + +First let's look at a simple contract smart contract called `SimpleStorage`, which we will be using on a Tezos blockchain. Here we have one state variable called 'storedValue' and initialized with the value 12. During initialization the type of the variable was defined as 'int'. You can see more at [SmartPy types](https://smartpy.io/manual/syntax/integers-and-mutez). And then we added a simple test, which set the storage value to 15 and checks that the value was changed as expected. + +> **NOTE:** Smart contract's tests (marked with `@sp.add_test` annotation) are used to verify the validity of contract entrypoints and do not affect the state of the contract during deployment. + +Here is the source for this contract: + +```smarty +import smartpy as sp + +@sp.module +def main(): + # Declares a new contract + class SimpleStorage(sp.Contract): + # Storage. Persists in between transactions + def __init__(self, value): + self.data.x = value + + # Allows the stored integer to be changed + @sp.entrypoint + def set(self, params): + self.data.x = params.value + + # Returns the currently stored integer + @sp.onchain_view() + def get(self): + return self.data.x + +@sp.add_test(name="SimpleStorage") +def test(): + # Initialize the contract + c = main.SimpleStorage(12) + + # Create a test scenario and run some test cases + scenario = sp.test_scenario(main) + scenario.h1("SimpleStorage") + scenario += c + c.set(value=15) + scenario.verify(c.data.x == 15) + scenario.verify(scenario.compute(c.get()) == 15) +``` + +## Contract deployment + +To deploy the contract, we will use [SmartPy IDE](https://smartpy.io/ide). +1. Open an IDE; +2. Paste the contract code; +3. Click "Run code" button; +4. Then you will see "Deploy Michelson Contract" button, click on that; +5. Choose the Ghostnet network; +6. Select an account, which you're going to use to deploy the contract; +7. Click "Estimate Cost From RPC" button; +8. Click "Deploy Contract" button; + +![ContractDeployment](images/tezos_contract_deployment.png) +![ContractDeployment2](images/tezos_contract_deployment2.png) + +Here we can see that our new contract address is `KT1ED4gj2xZnp8318yxa5NpvyvW15pqe4yFg`. This is the address that we will reference in the rest of this guide. + +## The FireFly Interface Format + +As we know from the previous section - smart contracts on the Tezos blockchain are using the domain-specific, stack-based programming language called [Michelson](https://tezos.gitlab.io/active/michelson.html). It is a key component of the Tezos platform and plays a fundamental role in defining the behavior of smart contracts and facilitating their execution. +This language is very efficient but also a bit tricky and challenging for learning, so in order to teach FireFly how to interact with the smart contract, we will be using [FireFly Interface (FFI)](../../reference/firefly_interface_format.md) to define the contract inteface which later will be encoded to Michelson. + +The following FFI sample demonstrates the specification for the widely used FA2 (analogue of ERC721 for EVM) smart contract: + +```json +{ + "namespace": "default", + "name": "fa2", + "version": "v1.0.0", + "description": "", + "methods": [ + { + "name": "burn", + "pathname": "", + "description": "", + "params": [ + { + "name": "token_ids", + "schema": { + "type": "array", + "details": { + "type": "nat", + "internalType": "nat" + } + } + } + ], + "returns": [] + }, + { + "name": "destroy", + "pathname": "", + "description": "", + "params": [], + "returns": [] + }, + { + "name": "mint", + "pathname": "", + "description": "", + "params": [ + { + "name": "owner", + "schema": { + "type": "string", + "details": { + "type": "address", + "internalType": "address" + } + } + }, + { + "name": "requests", + "schema": { + "type": "array", + "details": { + "type": "schema", + "internalSchema": { + "type": "struct", + "args": [ + { + "name": "metadata", + "type": "bytes" + }, + { + "name": "token_id", + "type": "nat" + } + ] + } + } + } + } + ], + "returns": [] + }, + { + "name": "pause", + "pathname": "", + "description": "", + "params": [ + { + "name": "pause", + "schema": { + "type": "boolean", + "details": { + "type": "boolean", + "internalType": "boolean" + } + } + } + ], + "returns": [] + }, + { + "name": "select", + "pathname": "", + "description": "", + "params": [ + { + "name": "batch", + "schema": { + "type": "array", + "details": { + "type": "schema", + "internalSchema": { + "type": "struct", + "args": [ + { + "name": "token_id", + "type": "nat" + }, + { + "name": "recipient", + "type": "address" + }, + { + "name": "token_id_start", + "type": "nat" + }, + { + "name": "token_id_end", + "type": "nat" + } + ] + } + } + } + } + ], + "returns": [] + }, + { + "name": "transfer", + "pathname": "", + "description": "", + "params": [ + { + "name": "batch", + "schema": { + "type": "array", + "details": { + "type": "schema", + "internalSchema": { + "type": "struct", + "args": [ + { + "name": "from_", + "type": "address" + }, + { + "name": "txs", + "type": "list", + "args": [ + { + "type": "struct", + "args": [ + { + "name": "to_", + "type": "address" + }, + { + "name": "token_id", + "type": "nat" + }, + { + "name": "amount", + "type": "nat" + } + ] + } + ] + } + ] + } + } + } + } + ], + "returns": [] + }, + { + "name": "update_admin", + "pathname": "", + "description": "", + "params": [ + { + "name": "admin", + "schema": { + "type": "string", + "details": { + "type": "address", + "internalType": "address" + } + } + } + ], + "returns": [] + }, + { + "name": "update_operators", + "pathname": "", + "description": "", + "params": [ + { + "name": "requests", + "schema": { + "type": "array", + "details": { + "type": "schema", + "internalSchema": { + "type": "variant", + "variants": [ + "add_operator", + "remove_operator" + ], + "args": [ + { + "type": "struct", + "args": [ + { + "name": "owner", + "type": "address" + }, + { + "name": "operator", + "type": "address" + }, + { + "name": "token_id", + "type": "nat" + } + ] + } + ] + } + } + } + } + ], + "returns": [] + } + ], + "events": [] +} +``` + + +## Broadcast the contract interface + +Now that we have a FireFly Interface representation of our smart contract, we want to broadcast that to the entire network. This broadcast will be pinned to the blockchain, so we can always refer to this specific name and version, and everyone in the network will know exactly which contract interface we are talking about. + +We will use the FFI JSON constructed above and `POST` that to the `/contracts/interfaces` API endpoint. + +### Request + +`POST` `http://localhost:5000/api/v1/namespaces/default/contracts/interfaces` + +```json +{ + "namespace": "default", + "name": "simplestorage", + "version": "v1.0.0", + "description": "", + "methods": [ + { + "name": "set", + "pathname": "", + "description": "", + "params": [ + { + "name": "newValue", + "schema": { + "type": "integer", + "details": { + "type": "integer", + "internalType": "integer" + } + } + } + ], + "returns": [] + }, + { + "name": "get", + "pathname": "", + "description": "", + "params": [], + "returns": [] + } + ], + "events": [] +} +``` + +### Response + +```json +{ + "id": "f9e34787-e634-46cd-af47-b52c537404ff", + "namespace": "default", + "name": "simplestorage", + "description": "", + "version": "v1.0.0", + "methods": [ + { + "id": "78f13a7f-7b85-47c3-bf51-346a9858c027", + "interface": "f9e34787-e634-46cd-af47-b52c537404ff", + "name": "set", + "namespace": "default", + "pathname": "set", + "description": "", + "params": [ + { + "name": "newValue", + "schema": { + "type": "integer", + "details": { + "type": "integer", + "internalType": "integer" + } + } + } + ], + "returns": [] + }, + { + "id": "ee864e25-c3f7-42d3-aefd-a82f753e9002", + "interface": "f9e34787-e634-46cd-af47-b52c537404ff", + "name": "get", + "namespace": "tezos", + "pathname": "get", + "description": "", + "params": [], + "returns": [] + } + ] +} +``` + +> **NOTE**: We can broadcast this contract interface conveniently with the help of FireFly Sandbox running at `http://127.0.0.1:5108` +* Go to the `Contracts Section` +* Click on `Define a Contract Interface` +* Select `FFI - FireFly Interface` in the `Interface Fromat` dropdown +* Copy the `FFI JSON` crafted by you into the `Schema` Field +* Click on `Run` + +## Create an HTTP API for the contract + +Now comes the fun part where we see some of the powerful, developer-friendly features of FireFly. The next thing we're going to do is tell FireFly to build an HTTP API for this smart contract, complete with an OpenAPI Specification and Swagger UI. As part of this, we'll also tell FireFly where the contract is on the blockchain. + +Like the interface broadcast above, this will also generate a broadcast which will be pinned to the blockchain so all the members of the network will be aware of and able to interact with this API. + +We need to copy the `id` field we got in the response from the previous step to the `interface.id` field in the request body below. We will also pick a name that will be part of the URL for our HTTP API, so be sure to pick a name that is URL friendly. In this case we'll call it `simple-storage`. Lastly, in the `location.address` field, we're telling FireFly where an instance of the contract is deployed on-chain. + +> **NOTE**: The `location` field is optional here, but if it is omitted, it will be required in every request to invoke or query the contract. This can be useful if you have multiple instances of the same contract deployed to different addresses. + +### Request + +`POST` `http://localhost:5000/api/v1/namespaces/default/apis` + +```json +{ + "name": "simple-storage", + "interface": { + "id": "f9e34787-e634-46cd-af47-b52c537404ff" + }, + "location": { + "address": "KT1ED4gj2xZnp8318yxa5NpvyvW15pqe4yFg" + } +} +``` + +### Response + +```json +{ + "id": "af09de97-741d-4f61-8d30-4db5e7460f76", + "namespace": "default", + "interface": { + "id": "f9e34787-e634-46cd-af47-b52c537404ff" + }, + "location": { + "address": "KT1ED4gj2xZnp8318yxa5NpvyvW15pqe4yFg" + }, + "name": "simple-storage", + "urls": { + "openapi": "http://127.0.0.1:5000/api/v1/namespaces/default/apis/simple-storage/api/swagger.json", + "ui": "http://127.0.0.1:5000/api/v1/namespaces/default/apis/simple-storage/api" + } +} +``` + +## View OpenAPI spec for the contract + +You'll notice in the response body that there are a couple of URLs near the bottom. If you navigate to the one labeled `ui` in your browser, you should see the Swagger UI for your smart contract. + +![Swagger UI](images/simple_storage_swagger.png "Swagger UI") + +## Invoke the smart contract + +Now that we've got everything set up, it's time to use our smart contract! We're going to make a `POST` request to the `invoke/set` endpoint to set the integer value on-chain. Let's set it to the value of `3` right now. + +### Request + +`POST` `http://localhost:5000/api/v1/namespaces/default/apis/simple-storage/invoke/set` + +```json +{ + "input": { + "newValue": 3 + } +} +``` + +### Response + +```json +{ + "id": "87c7ee1b-33d1-46e2-b3f5-8566c14367cf", + "type": "blockchain_invoke", + "status": "Pending", + "..." +} +``` + +You'll notice that we got an ID back with status `Pending`, and that's expected due to the asynchronous programming model of working with smart contracts in FireFly. To see what the value is now, we can query the smart contract. + +## Query the current value + +To make a read-only request to the blockchain to check the current value of the stored integer, we can make a `POST` to the `query/get` endpoint. + +### Request + +`POST` `http://localhost:5000/api/v1/namespaces/default/apis/simple-storage/query/get` + +```json +{} +``` + +### Response + +```json +{ + "3" +} +``` + +> **NOTE:** Some contracts may have queries that require input parameters. That's why the query endpoint is a `POST`, rather than a `GET` so that parameters can be passed as JSON in the request body. This particular function does not have any parameters, so we just pass an empty JSON object. \ No newline at end of file diff --git a/go.mod b/go.mod index 57937d1685..1e78c3afa1 100644 --- a/go.mod +++ b/go.mod @@ -1,35 +1,35 @@ module github.com/hyperledger/firefly -go 1.18 +go 1.21 require ( + blockwatch.cc/tzgo v1.17.1 github.com/DATA-DOG/go-sqlmock v1.5.0 - github.com/Masterminds/squirrel v1.5.3 + github.com/Masterminds/squirrel v1.5.4 github.com/aidarkhanov/nanoid v1.0.8 github.com/blang/semver/v4 v4.0.0 github.com/docker/go-units v0.5.0 - github.com/getkin/kin-openapi v0.112.0 + github.com/getkin/kin-openapi v0.116.0 github.com/ghodss/yaml v1.0.0 github.com/go-resty/resty/v2 v2.7.0 - github.com/golang-migrate/migrate/v4 v4.15.2 + github.com/golang-migrate/migrate/v4 v4.16.1 github.com/gorilla/mux v1.8.0 github.com/gorilla/websocket v1.5.0 - github.com/hyperledger/firefly-common v1.2.10 - github.com/hyperledger/firefly-signer v1.1.5 + github.com/hyperledger/firefly-common v1.4.1 + github.com/hyperledger/firefly-signer v1.1.8 github.com/jarcoal/httpmock v1.2.0 - github.com/karlseguin/ccache v2.0.3+incompatible - github.com/lib/pq v1.10.7 - github.com/mattn/go-sqlite3 v1.14.16 + github.com/lib/pq v1.10.9 + github.com/mattn/go-sqlite3 v1.14.19 github.com/prometheus/client_golang v1.14.0 github.com/qeesung/image2ascii v1.0.1 github.com/santhosh-tekuri/jsonschema/v5 v5.1.1 - github.com/sirupsen/logrus v1.9.0 + github.com/sirupsen/logrus v1.9.2 github.com/spf13/cobra v1.6.1 github.com/spf13/viper v1.14.0 github.com/stretchr/testify v1.8.1 - gitlab.com/hfuss/mux-prometheus v0.0.4 - golang.org/x/net v0.8.0 - golang.org/x/text v0.8.0 + gitlab.com/hfuss/mux-prometheus v0.0.5 + golang.org/x/net v0.17.0 + golang.org/x/text v0.14.0 gopkg.in/yaml.v2 v2.4.0 ) @@ -38,6 +38,9 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect + github.com/echa/log v1.2.4 // indirect + github.com/fatih/color v1.15.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/swag v0.22.3 // indirect @@ -49,12 +52,13 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/invopop/yaml v0.2.0 // indirect github.com/josharian/intern v1.0.0 // indirect + github.com/karlseguin/ccache v2.0.3+incompatible // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect @@ -62,6 +66,7 @@ require ( github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.0.6 // indirect + github.com/perimeterx/marshmallow v1.1.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect @@ -75,14 +80,14 @@ require ( github.com/stretchr/objx v0.5.0 // indirect github.com/subosito/gotenv v1.4.1 // indirect github.com/wayneashleyberry/terminal-dimensions v1.1.0 // indirect - github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 // indirect github.com/x-cray/logrus-prefixed-formatter v0.5.2 // indirect go.uber.org/atomic v1.10.0 // indirect - golang.org/x/crypto v0.4.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/term v0.6.0 // indirect + golang.org/x/crypto v0.17.0 // indirect + golang.org/x/sys v0.15.0 // indirect + golang.org/x/term v0.15.0 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index a484b3a9cc..725f9cfea4 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= -bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= +blockwatch.cc/tzgo v1.17.1 h1:00xwa5MS8DAO6ddtTRAw/VdfEdGZHgUadjtOeFDLgjY= +blockwatch.cc/tzgo v1.17.1/go.mod h1:tTgPzOH1pMhQod2sh2/jjOLabdCQegb8FZG23+fv1XE= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -19,18 +19,6 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -39,12 +27,10 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/spanner v1.28.0/go.mod h1:7m6mtQZn/hMbMfx62ct5EWrGND4DNqkXyrmBPRS+OJo= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= @@ -52,488 +38,92 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= -github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= -github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= -github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= -github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/adal v0.9.16/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/ClickHouse/clickhouse-go v1.4.3/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/Masterminds/squirrel v1.5.3 h1:YPpoceAcxuzIljlr5iWpNKaql7hLeG1KLSrhvdHpkZc= -github.com/Masterminds/squirrel v1.5.3/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= -github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= -github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= -github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= -github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= -github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= -github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= -github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= -github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= -github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= -github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= -github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= -github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= +github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/aidarkhanov/nanoid v1.0.8 h1:yxyJkgsEDFXP7+97vc6JevMcjyb03Zw+/9fqhlVXBXA= github.com/aidarkhanov/nanoid v1.0.8/go.mod h1:vadfZHT+m4uDhttg0yY4wW3GKtl2T6i4d2Age+45pYk= -github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= -github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apache/arrow/go/arrow v0.0.0-20210818145353-234c94e4ce64/go.mod h1:2qMFB56yOP3KzkB3PbYZ4AlUFg3a88F67TIx5lB/WwY= -github.com/apache/arrow/go/arrow v0.0.0-20211013220434-5962184e7a30/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= -github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= -github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= -github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= -github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= -github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM= -github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.4.0/go.mod h1:eHwXu2+uE/T6gpnYWwBwqoeqRf9IXyCcolyOWDRAErQ= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.4/go.mod h1:Ex7XQmbFmgFHrjUX6TN3mApKW5Hglyga+F7wZHTtYhA= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.2/go.mod h1:EASdTcM1lGhUe1/p4gkojHwlGJkeoRjjr1sRCzup3Is= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.2/go.mod h1:QuL2Ym8BkrLmN4lUofXYq6000/i5jPjosCNK//t6gak= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.2/go.mod h1:np7TMuJNT83O0oDOSF8i4dF3dvGqA6hPYYo6YYkzgRA= -github.com/aws/aws-sdk-go-v2/service/s3 v1.12.0/go.mod h1:6J++A5xpo7QDsIeSqPK4UHqMSyPOCopa+zKtqAMhqVQ= -github.com/aws/aws-sdk-go-v2/service/s3 v1.16.1/go.mod h1:CQe/KvWV1AqRc65KqeJjrLzr5X2ijnFTTVzJW0VBRCI= -github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo= -github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= -github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs= -github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= -github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59 h1:WWB576BN5zNSZc/M9d/10pqEx5VHNhaQ/yOVAkmj5Yo= github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= -github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= -github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= -github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= -github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= -github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= -github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= -github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= -github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= -github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= -github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= -github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/cockroachdb/cockroach-go/v2 v2.1.1/go.mod h1:7NtUnP6eK+l6k483WSYNrq3Kb23bWV10IRV1TyeSpwM= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= -github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= -github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= -github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= -github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= -github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= -github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= -github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= -github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= -github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= -github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= -github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= -github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= -github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= -github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= -github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= -github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= -github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= -github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= -github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= -github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= -github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= -github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= -github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= -github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= -github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s= -github.com/containerd/containerd v1.6.1 h1:oa2uY0/0G+JX4X7hpGCYvkp9FjUancz56kSNnb1sG3o= -github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE= -github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= -github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= -github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= -github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= -github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk= -github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= -github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= -github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= -github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= -github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= -github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= -github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= -github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= -github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= -github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= -github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= -github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= -github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= -github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= -github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= -github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4= -github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= -github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= -github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= -github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= -github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= -github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= -github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= -github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= -github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= -github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= -github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= -github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= -github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= -github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= -github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE= -github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= -github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= -github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= -github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= -github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= -github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= -github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= -github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= -github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= -github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= -github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= -github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= -github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dhui/dktest v0.3.10 h1:0frpeeoM9pHouHjhLeZDuDTJ0PqjDTrycaHaMmkJAo8= -github.com/dhui/dktest v0.3.10/go.mod h1:h5Enh0nG3Qbo9WjNFRrwmKUaePEBhXMOygbz3Ww7Sz0= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= -github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= -github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.13+incompatible h1:5s7uxnKZG+b8hYWlPYUi6x1Sjpq2MSt96d15eLZeHyw= -github.com/docker/docker v20.10.13+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/dhui/dktest v0.3.16 h1:i6gq2YQEtcrjKbeJpBkWjE8MmLZPYllcjOFbTZuPDnw= +github.com/dhui/dktest v0.3.16/go.mod h1:gYaA3LRmM8Z4vJl2MA0THIigJoZrwOansEOsp+kqxp0= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v20.10.24+incompatible h1:Ugvxm7a8+Gz6vqQYQQ2W7GYq5EUPaAiuPgIfVyI3dYE= +github.com/docker/docker v20.10.24+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= -github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= -github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/echa/bson v0.0.0-20220430141917-c0fbdf7f8b79 h1:J+/tX7s5mN1aoeQi2ySzix7+zyEhnymkudOxn7VMze4= +github.com/echa/bson v0.0.0-20220430141917-c0fbdf7f8b79/go.mod h1:Ih8Pfj34Z/kOmaLua+KtFWFK3AviGsH5siipj6Gmoa8= +github.com/echa/log v1.2.4 h1:+3+WEqutIBUbASYnuk9zz6HKlm6o8WsFxlOMbA3BcAA= +github.com/echa/log v1.2.4/go.mod h1:KYs5YtFCgL4yHBBqhPmTBhz5ETI1A8q+qbiDPPF1MiM= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/fsouza/fake-gcs-server v1.17.0/go.mod h1:D1rTE4YCyHFNa99oyJJ5HyclvN/0uQR+pM/VdlL83bw= -github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= -github.com/gabriel-vasile/mimetype v1.3.1/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8= -github.com/gabriel-vasile/mimetype v1.4.0/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8= -github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= -github.com/getkin/kin-openapi v0.112.0 h1:lnLXx3bAG53EJVI4E/w0N8i1Y/vUZUEsnrXkgnfn7/Y= -github.com/getkin/kin-openapi v0.112.0/go.mod h1:QtwUNt0PAAgIIBEvFWYfB7dfngxtAaqCX1zYHMZDeK8= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/getkin/kin-openapi v0.116.0 h1:o986hwgMzR972JzOG5j6+WTwWqllZLs1EJKMKCivs2E= +github.com/getkin/kin-openapi v0.116.0/go.mod h1:l5e9PaFUo9fyLJCPGQeXI2ML8c3P8BHOEV2VaAVf/pc= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= -github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= -github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= -github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY= github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= -github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= -github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= -github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= -github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= -github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= -github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= -github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= -github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= -github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= -github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= -github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= -github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= -github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= -github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= -github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/gocql/gocql v0.0.0-20210515062232-b7ef815b4556/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= -github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= -github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= -github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= +github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-migrate/migrate/v4 v4.15.2 h1:vU+M05vs6jWHKDdmE1Ecwj0BznygFc4QsdRe2E/L7kc= -github.com/golang-migrate/migrate/v4 v4.15.2/go.mod h1:f2toGLkYqD3JH+Todi4aZ2ZdbeUNx4sIwiOK96rE9Lw= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang-migrate/migrate/v4 v4.16.1 h1:O+0C55RbMN66pWm5MjO6mw0px6usGpY0+bkSGW9zCo0= +github.com/golang-migrate/migrate/v4 v4.16.1/go.mod h1:qXiwa/3Zeqaltm1MxOCZDYysW/F6folYiBgBG03l9hc= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -541,9 +131,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -559,17 +146,10 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -578,22 +158,13 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= -github.com/google/go-github/v39 v39.2.0/go.mod h1:C1s8C5aCC9L+JXIYpJM5GYytdX52vC1bLvHEF1IhBrE= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -604,707 +175,207 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= -github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= -github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/hyperledger/firefly-common v1.2.10 h1:im+GPaTnv764WmNvmtFFXz7+wx2IXg58Pf8AtDapM5o= -github.com/hyperledger/firefly-common v1.2.10/go.mod h1:kSAawo5is7RKWh3e9PMdWuR/OitfHAaAWVSK6V4lX80= -github.com/hyperledger/firefly-signer v1.1.5 h1:kCHe4O3tmYaMYf1hXrG40sIlpiGfOICmNNAcuMH5+jQ= -github.com/hyperledger/firefly-signer v1.1.5/go.mod h1:XZP8ZXUZJSAPB6WUSM9RqRdpjnjm6Jsn0784WM4IKug= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/hyperledger/firefly-common v1.4.1 h1:ZhEsEQonzOHIUnHeEm+z8hq5BIQIVy1c41kVTjhblRM= +github.com/hyperledger/firefly-common v1.4.1/go.mod h1:wbhklt6aNEs3cEQjNEf3NZXOnB5pz0f2gxRh6S9rBmg= +github.com/hyperledger/firefly-signer v1.1.8 h1:XyJjZXesih2dWYG31m5ZYt4irH7/PdkRutMPld7AqKE= +github.com/hyperledger/firefly-signer v1.1.8/go.mod h1:vNbbROziwqkOmO0b+9ky3devjcFg0JIkR2M1KG7seTQ= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ= github.com/invopop/yaml v0.1.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q= github.com/invopop/yaml v0.2.0 h1:7zky/qH+O0DwAyoobXUqvVBwgBFRxKoQ/3FjcVpjTMY= github.com/invopop/yaml v0.2.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q= -github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= -github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw= -github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= -github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= -github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= -github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= -github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk= -github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= -github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= -github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= -github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds= -github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= -github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= -github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= -github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.0.7/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= -github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= -github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= -github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= -github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= -github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0= -github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po= -github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ= -github.com/jackc/pgtype v1.6.2/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig= -github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= -github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= -github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= -github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA= -github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o= -github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg= -github.com/jackc/pgx/v4 v4.10.1/go.mod h1:QlrWebbs3kqEZPHCTGyxecvzG6tvIsYu+A5b1raylkA= -github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jarcoal/httpmock v1.2.0 h1:gSvTxxFR/MEMfsGrvRbdfpRUMBStovlSRLw0Ep1bwwc= github.com/jarcoal/httpmock v1.2.0/go.mod h1:oCoTsnAz4+UoOUIf5lJOWV2QQIW5UoeUI6aM2YnWAZk= -github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= -github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= -github.com/jmoiron/sqlx v1.3.1/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= -github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= -github.com/k0kubun/pp v2.3.0+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= -github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/karlseguin/ccache v2.0.3+incompatible h1:j68C9tWOROiOLWTS/kCGg9IcJG+ACqn5+0+t8Oh83UU= github.com/karlseguin/ccache v2.0.3+incompatible/go.mod h1:CM9tNPzT6EdRh14+jiW8mEF9mkNZuuE51qmgGYUB93w= github.com/karlseguin/expect v1.0.8 h1:Bb0H6IgBWQpadY25UDNkYPDB9ITqK1xnSoZfAq362fw= -github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= -github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/karlseguin/expect v1.0.8/go.mod h1:lXdI8iGiQhmzpnnmU/EGA60vqKs8NbRNFnhhrJGoD5g= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/ktrysmt/go-bitbucket v0.6.4/go.mod h1:9u0v3hsd2rqCHRIpbir1oP7F58uo5dq19sBYvuMoyQ4= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= -github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= -github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/pkger v0.15.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.10/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= -github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-sqlite3 v1.14.19 h1:fhGleo2h1p8tVChob4I9HpmVFIAkKGpiukdrgQbWfGI= +github.com/mattn/go-sqlite3 v1.14.19/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/maxatome/go-testdeep v1.11.0 h1:Tgh5efyCYyJFGUYiT0qxBSIDeXw0F5zSoatlou685kk= -github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= +github.com/maxatome/go-testdeep v1.11.0/go.mod h1:011SgQ6efzZYAen6fDn4BqQ+lUR72ysdyKe7Dyogw70= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= -github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= -github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= -github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= -github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= -github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= -github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mutecomm/go-sqlcipher/v4 v4.4.0/go.mod h1:PyN04SaWalavxRGH9E8ZftG6Ju7rsPrGmQRjrEaVpiY= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA= -github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= -github.com/neo4j/neo4j-go-driver v1.8.1-0.20200803113522-b626aa943eba/go.mod h1:ncO5VaFWh0Nrt+4KT4mOZboaczBZcLuHrG+/sUeP8gI= github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ= github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= -github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= -github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= -github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= -github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= -github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= -github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= -github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= -github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= -github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= -github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU= github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= -github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/perimeterx/marshmallow v1.1.4 h1:pZLDH9RjlLGGorbXhcaQLhfuV0pFMNfPO55FuFkxqLw= +github.com/perimeterx/marshmallow v1.1.4/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= -github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= -github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/qeesung/image2ascii v1.0.1 h1:Fe5zTnX/v/qNC3OC4P/cfASOXS501Xyw2UUcgrLgtp4= github.com/qeesung/image2ascii v1.0.1/go.mod h1:kZKhyX0h2g/YXa/zdJR3JnLnJ8avHjZ3LrvEKSYyAyU= -github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rs/cors v1.8.3 h1:O+qNyWn7Z+F9M0ILBHgMVPuB1xTOucVd5gtaYyXBpRo= github.com/rs/cors v1.8.3/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= -github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= -github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/santhosh-tekuri/jsonschema/v5 v5.1.1 h1:lEOLY2vyGIqKWUI9nzsOJRV3mb3WC9dXYORsLEUcoeY= github.com/santhosh-tekuri/jsonschema/v5 v5.1.1/go.mod h1:FKdcjfQW6rpZSnxxUvEA5H/cDPdvJ/SZJQLWWXWGrZ0= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= -github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/snowflakedb/gosnowflake v1.6.3/go.mod h1:6hLajn6yxuJ4xUHZegMekpq9rnQbGJ7TMwXjgTmA6lg= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/sirupsen/logrus v1.9.2 h1:oxx1eChJGI6Uks2ZC4W1zpLlVgqB8ner4EuQwV4Ik1Y= +github.com/sirupsen/logrus v1.9.2/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk= github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= -github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.14.0 h1:Rg7d3Lo706X9tHsJMUjdiwMpHB7W8WnSVOssIY+JElU= github.com/spf13/viper v1.14.0/go.mod h1:WT//axPky3FdvXHzGw33dNdXXXfFQqmEalje+egj8As= -github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= -github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/ugorji/go v1.2.7 h1:qYhyWUUd6WbiM+C6JZAUkIJt/1WrjzNHY9+KCIjVqTo= +github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= +github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= +github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= github.com/wayneashleyberry/terminal-dimensions v1.1.0 h1:EB7cIzBdsOzAgmhTUtTTQXBByuPheP/Zv1zL2BRPY6g= github.com/wayneashleyberry/terminal-dimensions v1.1.0/go.mod h1:2lc/0eWCObmhRczn2SdGSQtgBooLUzIotkkEGXqghyg= -github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 h1:3UeQBvD0TFrlVjOeLOBz+CPAI8dnbqNSVwUwRrkp7vQ= github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0/go.mod h1:IXCdmsXIht47RaVFLEdVnh1t+pgYtTAhQGj73kz+2DM= github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= -github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= -github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= -github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= -github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= -github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= -github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= -gitlab.com/hfuss/mux-prometheus v0.0.4 h1:MbH7QtvPp9CVInvbQtuTC56eMvAguQK2zgVoKRcJ5l0= -gitlab.com/hfuss/mux-prometheus v0.0.4/go.mod h1:4dALqvZzJisEAII64a6zhtdDEfvs+BjemTynBDWuRK0= -gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= -go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= -go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= -go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= -go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= -go.mongodb.org/mongo-driver v1.7.0/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8= -go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +gitlab.com/hfuss/mux-prometheus v0.0.5 h1:Kcqyiekx8W2dO1EHg+6wOL1F0cFNgRO1uCK18V31D0s= +gitlab.com/hfuss/mux-prometheus v0.0.5/go.mod h1:xcedy8rVGr9TFgRu2urfGuh99B4NdfYdpE4aUMQ0dxA= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= -go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= -go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= -go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= -go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= -go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= -go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8= -golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= -golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1316,7 +387,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -1327,36 +397,20 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190225153610-fe579d43d832/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1367,40 +421,19 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210505024714-0287a6fb4125/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220111093109-d55c255bac03/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1409,77 +442,32 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1487,135 +475,56 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210818153620-00dd8d7831e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220317061510-51cd9980dadf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1635,44 +544,25 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= +golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= -gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= -google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1692,34 +582,18 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513 google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= -google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -1728,7 +602,6 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -1737,64 +610,25 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210630183607-d20f26d13c79/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e h1:S9GbmC1iCgvbLyAokVCwiO6tVIrU9Y7c5oMx1V/ki/Y= -google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -1804,24 +638,9 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1834,59 +653,28 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/driver/postgres v1.0.8/go.mod h1:4eOzrI1MUfm6ObJU/UcmbXyiHSs8jSwH95G5P5dxcAg= -gorm.io/gorm v1.20.12/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= -gorm.io/gorm v1.21.4/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= gotest.tools/v3 v3.1.0 h1:rVV8Tcg/8jHUkPUorwjaMTtemIMVXfIPKiOqnhEhakk= gotest.tools/v3 v3.1.0/go.mod h1:fHy7eyTmJFO5bQbUsEGQ1v4m2J3Jz9eWL54TP2/ZuYQ= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1896,85 +684,6 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= -k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= -k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= -k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs= -k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= -k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= -k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U= -k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= -k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= -k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= -k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ= -k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= -k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= -k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= -k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y= -k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= -k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= -k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= -k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= -k8s.io/component-base v0.22.5/go.mod h1:VK3I+TjuF9eaa+Ln67dKxhGar5ynVbwnGrUiNF4MqCI= -k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= -k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= -k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= -k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= -k8s.io/cri-api v0.23.1/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4= -k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= -k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= -k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= -k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg= -modernc.org/cc/v3 v3.32.4/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878= -modernc.org/ccgo/v3 v3.9.2/go.mod h1:gnJpy6NIVqkETT+L5zPsQFj7L2kkhfPMzOghRNv/CFo= -modernc.org/db v1.0.0/go.mod h1:kYD/cO29L/29RM0hXYl4i3+Q5VojL31kTUVpVJDw0s8= -modernc.org/file v1.0.0/go.mod h1:uqEokAEn1u6e+J45e54dsEA/pw4o7zLrA2GwyntZzjw= -modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8= -modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= -modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= -modernc.org/internal v1.0.0/go.mod h1:VUD/+JAkhCpvkUitlEOnhpVxCgsBI90oTzSCRcqQVSM= -modernc.org/libc v1.7.13-0.20210308123627-12f642a52bb8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= -modernc.org/libc v1.9.5/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= -modernc.org/lldb v1.0.0/go.mod h1:jcRvJGWfCGodDZz8BPwiKMJxGJngQ/5DrRapkQnLob8= -modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= -modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/memory v1.0.4/go.mod h1:nV2OApxradM3/OVbs2/0OsP6nPfakXpi50C7dcoHXlc= -modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/ql v1.0.0/go.mod h1:xGVyrLIatPcO2C1JvI/Co8c0sr6y91HKFNy4pt9JXEY= -modernc.org/sortutil v1.1.0/go.mod h1:ZyL98OQHJgH9IEfN71VsamvJgrtRX9Dj2gX+vH86L1k= -modernc.org/sqlite v1.10.6/go.mod h1:Z9FEjUtZP4qFEg6/SiADg9XCER7aYy9a/j7Pg9P7CPs= -modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= -modernc.org/tcl v1.5.2/go.mod h1:pmJYOLgpiys3oI4AeAafkcUfE+TKKilminxNyU/+Zlo= -modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -modernc.org/z v1.0.1-0.20210308123920-1f282aa71362/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= -modernc.org/z v1.0.1/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= -modernc.org/zappy v1.0.0/go.mod h1:hHe+oGahLVII/aTTyWK/b53VDHMAGCBYYeZ9sn83HC4= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/go.work b/go.work index 890eaec528..28b0fad5d2 100644 --- a/go.work +++ b/go.work @@ -1,7 +1,7 @@ -go 1.18 +go 1.21 use ( . - ./smart_contracts/fabric/firefly-go ./smart_contracts/fabric/custompin-sample + ./smart_contracts/fabric/firefly-go ) diff --git a/go.work.sum b/go.work.sum index f61260ef5f..b247562238 100644 --- a/go.work.sum +++ b/go.work.sum @@ -1,602 +1,295 @@ -bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512 h1:SRsZGA7aFnCZETmov57jwPrWuTmaZK6+4R4v5FUe1/c= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= -cloud.google.com/go v0.104.0 h1:gSmWO7DY1vOm0MVU6DNXM11BWHHsTUmsC5cv1fuW5X8= cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= -cloud.google.com/go/aiplatform v1.24.0 h1:QqHZT1IMldf/daXoSnkJWBIqGBsw50X+xP6HSVzLRPo= -cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= -cloud.google.com/go/analytics v0.12.0 h1:NKw6PpQi6V1O+KsjuTd+bhip9d0REYu4NevC45vtGp8= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= -cloud.google.com/go/area120 v0.6.0 h1:TCMhwWEWhCn8d44/Zs7UCICTWje9j3HuV6nVGMjdpYw= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= -cloud.google.com/go/artifactregistry v1.7.0 h1:9yKYCozdh29v7QMx3QBuksZGtPNICFb5SVnyNvkKRGg= -cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= -cloud.google.com/go/asset v1.8.0 h1:qzYOcI6u4CD+0R1E8rWbrqs04fISCcg2YYxW8yBAqFM= -cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= -cloud.google.com/go/assuredworkloads v1.7.0 h1:IYhjgcgwb5TIAhC0aWQGGOqBnP0c2xijgMGf1iJRs50= -cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= -cloud.google.com/go/automl v1.6.0 h1:U+kHmeKGXgBvTlrecPJhwkItWaIpIscG5DUpQxBQZZg= -cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= -cloud.google.com/go/bigquery v1.42.0 h1:JuTk8po4bCKRwObdT0zLb1K0BGkGHJdtgs2GK3j2Gws= -cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= -cloud.google.com/go/billing v1.5.0 h1:4RESn+mA7eGPBr5eQ4B/hbkHNivzYHbgRWpdlNeNjiE= -cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= -cloud.google.com/go/binaryauthorization v1.2.0 h1:5F7dowxGuYQlX3LjfjH/sKf+IvI1TsItTw0sDZmoec4= -cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= -cloud.google.com/go/cloudtasks v1.6.0 h1:IL5W4fh6dAq9x1mO+4evrWCISOmPJegdaO0hZRZmWNE= -cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0= cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= -cloud.google.com/go/containeranalysis v0.6.0 h1:2824iym832ljKdVpCBnpqm5K94YT/uHTVhNF+dRTXPI= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= -cloud.google.com/go/datacatalog v1.6.0 h1:xzXGAE2fAuMh+ksODKr9nRv9ega1vHjFwRqMA8tRrVE= -cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= -cloud.google.com/go/dataflow v0.7.0 h1:CW3541Fm7KPTyZjJdnX6NtaGXYFn5XbFC5UcjgALKvU= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= -cloud.google.com/go/dataform v0.4.0 h1:fnwkyzCVcPI/TmBheGgpmK2h+hWUIDHcZBincHRhrQ0= -cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= -cloud.google.com/go/datalabeling v0.6.0 h1:dp8jOF21n/7jwgo/uuA0RN8hvLcKO4q6s/yvwevs2ZM= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= -cloud.google.com/go/dataqna v0.6.0 h1:gx9jr41ytcA3dXkbbd409euEaWtofCVXYBvJz3iYm18= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= -cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= -cloud.google.com/go/datastream v1.3.0 h1:ula4YR2K66o5wifLdPQMtR2I6KP+zvqdSEb6ncd1e0g= -cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= -cloud.google.com/go/dialogflow v1.17.0 h1:NU0Pj57H++JQOW225/7o34sUZ4i9/TLfWFOSbI3N1cY= -cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= -cloud.google.com/go/documentai v1.8.0 h1:CipwaecNhtsWUSneV2J5y8OqudHqvqPlcMHgSyh8vak= -cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= -cloud.google.com/go/domains v0.7.0 h1:pu3JIgC1rswIqi5romW0JgNO6CTUydLYX8zyjiAvO1c= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= -cloud.google.com/go/edgecontainer v0.2.0 h1:hd6J2n5dBBRuAqnNUEsKWrp6XNPKsaxwwIyzOPZTokk= cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= -cloud.google.com/go/firestore v1.8.0 h1:HokMB9Io0hAyYzlGFeFVMgE3iaPXNvaIsDx5JzblGLI= -cloud.google.com/go/functions v1.7.0 h1:s3Snbr2O4j4p7CuwImBas8rNNmkHS1YJANcCpKGqQSE= -cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= -cloud.google.com/go/gaming v1.6.0 h1:PKggmegChZulPW8yvtziF8P9UOuVFwbvylbEucTNups= -cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= -cloud.google.com/go/gkeconnect v0.6.0 h1:zAcvDa04tTnGdu6TEZewaLN2tdMtUOJJ7fEceULjguA= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= -cloud.google.com/go/gkehub v0.10.0 h1:JTcTaYQRGsVm+qkah7WzHb6e9sf1C0laYdRPn9aN+vg= cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= -cloud.google.com/go/grafeas v0.2.0 h1:CYjC+xzdPvbV65gi6Dr4YowKcmLo045pm18L0DhdELM= -cloud.google.com/go/iam v0.3.0 h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/language v1.6.0 h1:Fb2iua/5/UBvUuW9PgBinwsCRDi1qoQJEuekOinHFCs= -cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= -cloud.google.com/go/lifesciences v0.6.0 h1:tIqhivE2LMVYkX0BLgG7xL64oNpDaFFI7teunglt1tI= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= -cloud.google.com/go/mediatranslation v0.6.0 h1:qAJzpxmEX+SeND10Y/4868L5wfZpo4Y3BIEnIieP4dk= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= -cloud.google.com/go/memcache v1.5.0 h1:qTBOiSnVw7rnW6GVeH5Br8qs80ILoflNgFZySvaT4ek= -cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= -cloud.google.com/go/metastore v1.6.0 h1:wzJ9HslsybiJ3HL2168dVonr9D/eBq0VqObiMSCrE6c= -cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= -cloud.google.com/go/networkconnectivity v1.5.0 h1:mtIQewrz1ewMU3J0vVkUIJtAkpOqgkz4+UmcreeAm08= -cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= -cloud.google.com/go/networksecurity v0.6.0 h1:qDEX/3sipg9dS5JYsAY+YvgTjPR63cozzAWop8oZS94= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= -cloud.google.com/go/notebooks v1.3.0 h1:YfPI4pOYQDcqJ+thM2cGtR9oRoRv42vRfubSPZnk3DI= -cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= -cloud.google.com/go/osconfig v1.8.0 h1:fkFlXCxkUt3tE8LYtF6CipuPbC/HIrciwDTjFpsTf88= -cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= -cloud.google.com/go/oslogin v1.5.0 h1:/7sVaMdtqSm6AjxW8KzoM6UKawkg3REr0XJ1zKtidpc= -cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= -cloud.google.com/go/phishingprotection v0.6.0 h1:OrwHLSRSZyaiOt3tnY33dsKSedxbMzsXvqB21okItNQ= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= -cloud.google.com/go/privatecatalog v0.6.0 h1:Vz86uiHCtNGm1DeC32HeG2VXmOq5JRYA3VRPf8ZEcSg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= -cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU= -cloud.google.com/go/recaptchaenterprise v1.3.1 h1:u6EznTGzIdsyOsvm+Xkw0aSuKFXQlyjGE9a4exk6iNQ= -cloud.google.com/go/recaptchaenterprise/v2 v2.3.0 h1:BkkI7C0o8CtaHvdDMr5IA+y8pk0Y5wb73C7DHQiAKnw= -cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= -cloud.google.com/go/recommendationengine v0.6.0 h1:6w+WxPf2LmUEqX0YyvfCoYb8aBYOcbIV25Vg6R0FLGw= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= -cloud.google.com/go/recommender v1.6.0 h1:C1tw+Qa/bgm6LoH1wuxYdoyinwKkW/jDJ0GpSJf58cE= -cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= -cloud.google.com/go/redis v1.8.0 h1:gtPd4pG/Go5mrdGQ4MJXxPHtjxtoWUBkrWLXNV1L2TA= -cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= -cloud.google.com/go/retail v1.9.0 h1:Q3W/JsQupZWaoFxUOugZd1Eq590R+Dk6dhacLK2h7+w= -cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= -cloud.google.com/go/scheduler v1.5.0 h1:Fe1Upic/q4cwqXbInCzgAW35QSerj8JlNwATIxDdfOI= -cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= -cloud.google.com/go/secretmanager v1.6.0 h1:5v0zegRMlytVnN7J+bg5Ipqah3I2RZ67ysy00mvA+lA= -cloud.google.com/go/security v1.8.0 h1:linnRc3/gJYDfKbAtNixVQ52+66DpOx5MmCz0NNxal8= -cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= -cloud.google.com/go/securitycenter v1.14.0 h1:hKIggnv2eCAPjsVnFcZbytMOsFOk6p4ut0iAUDoNsNA= -cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= -cloud.google.com/go/servicedirectory v1.5.0 h1:QmCWml/qvNOYyiPP4G52srYcsHSLCXuvydJDVLTFSe8= -cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= -cloud.google.com/go/spanner v1.28.0 h1:1ZukQlok9wZyZUBFm++xpleudtviOPO8gvGAF2ydxWQ= -cloud.google.com/go/speech v1.7.0 h1:bRI2QczZGpcPfuhHr63VOdfyyfYp/43N0wRuBKrd0nQ= -cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= -cloud.google.com/go/storage v1.23.0 h1:wWRIaDURQA8xxHguFCshYepGlrWIrbBnAmc7wfg07qY= -cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= -cloud.google.com/go/talent v1.2.0 h1:6c4pvu3k2idEhJRZnZ2HdVLWZUuT9fsns2gQtCzRtqA= -cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= -cloud.google.com/go/videointelligence v1.7.0 h1:w56i2xl1jHX2tz6rHXBPHd6xujevhImzbc16Kl+V/zQ= -cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= -cloud.google.com/go/vision v1.2.0 h1:/CsSTkbmO9HC8iQpxbK8ATms3OQaX3YQUeTMGCxlaK4= -cloud.google.com/go/vision/v2 v2.3.0 h1:eEyIDJ5/98UmQrYZ6eQExUT3iHyDjzzPX29UP6x7ZQo= -cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= -cloud.google.com/go/webrisk v1.5.0 h1:WdHJmLSAs5bIis/WWO7pIfiRBD1PiWe1OAlPrWeM9Tk= -cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= -cloud.google.com/go/workflows v1.7.0 h1:0MjX5ugKmTdbRG2Vai5aAgNAOe2wzvs/XQwFDSowy9c= -cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9 h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY= -gioui.org v0.0.0-20210308172011-57750fc8a0a6 h1:K72hopUosKG3ntOPNG4OzzbuhxGuVf06fa2la1/H/Ho= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8 h1:V8krnnfGj4pV65YLUm3C0/8bl7V5Nry2Pwvy3ru/wLc= -github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= -github.com/Azure/azure-sdk-for-go v16.2.1+incompatible h1:KnPIugL51v3N3WwvaSmZbxukD1WuWXOiE9fRdu32f2I= -github.com/Azure/azure-storage-blob-go v0.14.0 h1:1BCg74AmVdYwO3dlKwtFU1V0wU2PZdREkXvAmZJRUlM= -github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= -github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM= -github.com/Azure/go-autorest/autorest/adal v0.9.16 h1:P8An8Z9rH1ldbOLdFpxYorgOt2sywL9V24dAwWHPuGc= -github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= -github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= -github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= -github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc= -github.com/ClickHouse/clickhouse-go v1.4.3 h1:iAFMa2UrQdR5bHJ2/yaSLffZkxpcOYQMCUuKeNXGdqc= -github.com/DATA-DOG/go-txdb v0.1.3 h1:R4v6OuOcy2O147e2zHxU0B4NDtF+INb5R9q/CV7AEMg= -github.com/Microsoft/hcsshim v0.9.2 h1:wB06W5aYFfUB3IvootYAY2WnOmIdgPGfqSI6tufQNnY= -github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3 h1:4FA+QBaydEHlwxg0lMN3rhwoDaQy6LKhVWR4qvq4BuA= -github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= -github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= -github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= -github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af h1:wVe6/Ea46ZMeNkQjjBW6xcqyQA/j5e0D6GytH95g0gQ= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= -github.com/alexflint/go-filemutex v1.1.0 h1:IAWuUuRYL2hETx5b8vCgwnD+xSdlsTQY6s2JjBsqLdg= -github.com/antihax/optional v1.0.0 h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg= -github.com/apache/arrow/go/arrow v0.0.0-20211013220434-5962184e7a30 h1:HGREIyk0QRPt70R69Gm1JFHDgoiyYpCyuGE8E9k/nf0= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 h1:G1bPvciwNyF7IUmKXNt9Ak3m6u9DE1rF+RmtIkBpVdA= -github.com/armon/go-metrics v0.4.0 h1:yCQqn7dwca4ITXb+CbubHmedzaQYHhNhrEXLYUeEe8Q= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= -github.com/aws/aws-sdk-go v1.17.7 h1:/4+rDPe0W95KBmNGYCG+NUvdL8ssPYBMxL+aSCg6nIA= -github.com/aws/aws-sdk-go-v2 v1.9.2 h1:dUFQcMNZMLON4BOe273pl0filK9RqyQMhCK/6xssL6s= -github.com/aws/aws-sdk-go-v2/config v1.8.3 h1:o5583X4qUfuRrOGOgmOcDgvr5gJVSu57NK08cWAhIDk= -github.com/aws/aws-sdk-go-v2/credentials v1.4.3 h1:LTdD5QhK073MpElh9umLLP97wxphkgVC/OjQaEbBwZA= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0 h1:9tfxW/icbSu98C2pcNynm5jmDwU3/741F11688B6QnU= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.4 h1:TnU1cY51027j/MQeFy7DIgk1UuzJY+wLFYqXceY/fiE= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4 h1:leSJ6vCqtPpTmBIgE7044B1wql1E4n//McF+mEgNrYg= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0 h1:gceOysEWNNwLd6cki65IMBZ4WAM0MwgBQq2n7kejoT8= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2 h1:r7jel2aa4d9Duys7wEmWqDd5ebpC9w6Kxu6wIjjp18E= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.2 h1:RnZjLgtCGLsF2xYYksy0yrx6xPvKG9BYv29VfK4p/J8= -github.com/aws/aws-sdk-go-v2/service/s3 v1.16.1 h1:z+P3r4LrwdudLKBoEVWxIORrk4sVg4/iqpG3+CS53AY= -github.com/aws/aws-sdk-go-v2/service/sso v1.4.2 h1:pZwkxZbspdqRGzddDB92bkZBoB7lg85sMRE7OqdB3V0= -github.com/aws/aws-sdk-go-v2/service/sts v1.7.2 h1:ol2Y5DWqnJeKqNd8th7JWzBtqu63xpOfs1Is+n1t8/4= -github.com/aws/smithy-go v1.8.0 h1:AEwwwXQZtUwP5Mz506FeXXrKBe0jA8gVM+1gEcSRooc= -github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= -github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= -github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY= -github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y= -github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA= -github.com/bkaradzic/go-lz4 v1.0.0 h1:RXc4wYsyz985CkXXeX04y4VnZFGG8Rd43pRaHsOXAKk= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c h1:+0HFd5KSZ/mm3JmhmrDukiId5iR6w4+BdFtfSy4yWIc= -github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= -github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= -github.com/boombuler/barcode v1.0.0 h1:s1TvRnXwL2xJRaccrdcBQMZxq6X7DvsMogtmJeHDdrc= -github.com/bshuster-repo/logrus-logstash-hook v0.4.1 h1:pgAtgj+A31JBVtEHu2uHuEx0n+2ukqUJnS2vVe5pQNA= -github.com/btcsuite/btcd/btcec/v2 v2.1.3 h1:xM/n3yIhHAhHy04z4i43C8p4ehixJZMsnrVJkgl+MTE= -github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= -github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng= -github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ= -github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= -github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/checkpoint-restore/go-criu/v4 v4.1.0 h1:WW2B2uxx9KWF6bGlHqhm8Okiafwwx7Y2kcpn8lCpjgo= -github.com/checkpoint-restore/go-criu/v5 v5.3.0 h1:wpFFOoomK3389ue2lAb0Boag6XPht5QYpipxmSNL4d8= -github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= -github.com/cilium/ebpf v0.7.0 h1:1k/q3ATgxSXRdrmPfH8d7YK0GfqVsEKZAX9dQZvs56k= -github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= -github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 h1:F1EaeKL/ta07PY/k9Os/UFtwERei2/XzGemhpGnBKNg= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 h1:hzAQntlaYRkVSFEfj9OTWlVV1H155FMD8BTKktLv0QI= -github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490 h1:KwaoQzs/WeUxxJqiJsZ4euOly1Az/IgZXXSxlD/UBNk= -github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= -github.com/cockroachdb/cockroach-go/v2 v2.1.1 h1:3XzfSMuUT0wBe1a3o5C0eOTcArhmmFAg2Jzh/7hhKqo= -github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E= -github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY= -github.com/containerd/aufs v1.0.0 h1:2oeJiwX5HstO7shSrPZjrohJZLzK36wvpdmzDRkL/LY= -github.com/containerd/btrfs v1.0.0 h1:osn1exbzdub9L5SouXO5swW4ea/xVdJZ3wokxN5GrnA= -github.com/containerd/cgroups v1.0.3 h1:ADZftAkglvCiD44c77s5YmMqaP2pzVCFZvBmAlBdAP4= -github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw= -github.com/containerd/continuity v0.2.2 h1:QSqfxcn8c+12slxwu00AtzXrsami0MJb/MQs9lOLHLA= -github.com/containerd/fifo v1.0.0 h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU= -github.com/containerd/go-cni v1.1.3 h1:t0MQwrtM96SH71Md8tH0uKrVE9v+jxkDTbvFSm3B9VE= -github.com/containerd/go-runc v1.0.0 h1:oU+lLv1ULm5taqgV/CJivypVODI4SUz1znWjv3nNYS0= -github.com/containerd/imgcrypt v1.1.3 h1:69UKRsA3Q/lAwo2eDzWshdjimqhmprrWXfNtBeO0fBc= -github.com/containerd/nri v0.1.0 h1:6QioHRlThlKh2RkRTR4kIT3PKAcrLo3gIWnjkM4dQmQ= -github.com/containerd/stargz-snapshotter/estargz v0.4.1 h1:5e7heayhB7CcgdTkqfZqrNaNv15gABwr3Q2jBTbLlt4= -github.com/containerd/ttrpc v1.1.0 h1:GbtyLRxb0gOLR0TYQWt3O6B0NvT8tMdorEHqIQo/lWI= -github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY= -github.com/containerd/zfs v1.0.0 h1:cXLJbx+4Jj7rNsTiqVfm6i+RNLx6FFA2fMmDlEf+Wm8= -github.com/containernetworking/cni v1.0.1 h1:9OIL/sZmMYDBe+G8svzILAlulUpaDTUjeAbtH/JNLBo= -github.com/containernetworking/plugins v1.0.1 h1:wwCfYbTCj5FC0EJgyzyjTXmqysOiJE9r712Z+2KVZAk= -github.com/containers/ocicrypt v1.1.2 h1:Ez+GAMP/4GLix5Ywo/fL7O0nY771gsBIigiqUm1aXz0= -github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s= -github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ= -github.com/coreos/go-etcd v2.0.0+incompatible h1:bXhRBIXoTm9BYHS3gE0TtQuyNZyeEMux2sDi4oo5YOo= -github.com/coreos/go-iptables v0.6.0 h1:is9qnZMPYjLd8LYqmm/qlE+wwEgJIkTYdhV3rfZo4jk= -github.com/coreos/go-oidc v2.1.0+incompatible h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom8DBE9so9EBsM= -github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= -github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= -github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= -github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= -github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= -github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= -github.com/cucumber/godog v0.8.0 h1:sJ0MaOGfNeJWD+DiBjL4VTwrUJrFdiq5sF5b4wPgS+o= -github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= -github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369 h1:XNT/Zf5l++1Pyg08/HV04ppB0gKxAqtZQBRYiYrUuYk= -github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c h1:Xo2rK1pzOm0jO6abTPIQwbAmqBIOj132otexc1mmzFc= -github.com/d2g/dhcp4client v1.0.0 h1:suYBsYZIkSlUMEz4TAYCczKf62IA2UWC+O8+KtdOhCo= -github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5 h1:+CpLbZIeUn94m02LdEKPcgErLJ347NUwxPKs5u8ieiY= -github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4 h1:itqmmf1PFpC4n5JW+j4BU7X4MTfVurhYRTjODoPb2Y8= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= -github.com/denisenkom/go-mssqldb v0.10.0 h1:QykgLZBorFE95+gO3u9esLd0BmbvpWp0/waNNZfHBM8= -github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba h1:p6poVbjHDkKa+wtC8frBMwQtT3BmqGYBjzMwJ63tuR4= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954 h1:RMLoZVzv4GliuWafOuPuQDKSm1SJph7uCRnnS61JAn4= -github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= -github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017 h1:2HQmlpI3yI9deH18Q6xiSOIjXD4sLI55Y/gfpa8/558= -github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ= -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= -github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= -github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 h1:ZClxb8laGDf5arXfYcAtECDFgAgHklGI8CxgjHnXKJ4= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= -github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712 h1:aaQcKT9WumO6JEJcRyTqFVq4XUZiUcKR2/GI31TOcz8= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= -github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 h1:xvqufLtNVwAhN8NMyWklVgxnWohi+wtMGQMhtxexlm0= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220520190051-1e77728a1eaa/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/protoc-gen-validate v0.6.2 h1:JiO+kJTpmYGjEodY7O1Zk8oZcNz1+f30UtwtXoFUPzE= -github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= -github.com/fogleman/gg v1.3.0 h1:/7zJX8F6AaYQc57WQCyN9cAIz+4bCJGO9B+dyW29am8= -github.com/form3tech-oss/jwt-go v3.2.5+incompatible h1:/l4kBbb4/vGSsdtB5nUe8L7B9mImVMaBPw9L/0TBHU8= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= -github.com/fsouza/fake-gcs-server v1.17.0 h1:OeH75kBZcZa3ZE+zz/mFdJ2btt9FgqfjI7gIh9+5fvk= -github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa h1:RDBNVkRviHZtvDvId8XSGPu3rmpmSe+wKRcEWNgsfWU= -github.com/gabriel-vasile/mimetype v1.4.0 h1:Cn9dkdYsMIu56tGho+fqzh7XmvY2YyGU0FnbhiOsEro= -github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7 h1:LofdAjjjqCSXMwLGgOgnE+rdPuvX9DxCqaHwKy7i/ko= -github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs= -github.com/go-fonts/dejavu v0.1.0 h1:JSajPXURYqpr+Cu8U9bt8K+XcACIHWqWrvWCKyeFmVQ= -github.com/go-fonts/latin-modern v0.2.0 h1:5/Tv1Ek/QCr20C6ZOz15vw3g7GELYL98KWr8Hgo+3vk= -github.com/go-fonts/liberation v0.1.1 h1:wBrPaMkrXFBW3qXpXAjiKljdVUMxn9bX2ia3XjPHoik= -github.com/go-fonts/stix v0.1.0 h1:UlZlgrvvmT/58o573ot7NFw0vZasZ5I6bcIft/oMdgg= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4 h1:WtGNWLvXpe6ZudgnXrq0barxBImvnnJoMEhXAzcbM0I= -github.com/go-ini/ini v1.25.4 h1:Mujh4R/dH6YL8bxuISne3xX2+qcQ9p0IxKAP6ExWoUo= -github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= -github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= -github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07 h1:OTlfMvwR1rLyf9goVmXfuS5AJn80+Vmj4rTf4n46SOs= -github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= -github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= -github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= -github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd h1:hSkbZ9XSyjyBirMeqSqUrK+9HboWrweVlzRNqoBi2d4= -github.com/gobuffalo/depgen v0.1.0 h1:31atYa/UW9V5q8vMJ+W6wd64OaaTHUrCUXER358zLM4= -github.com/gobuffalo/flect v0.1.3 h1:3GQ53z7E3o00C/yy7Ko8VXqQXoJGLkrTQCLTF1EjoXU= -github.com/gobuffalo/genny v0.1.1 h1:iQ0D6SpNXIxu52WESsD+KoQ7af2e3nCfnSBoSF/hKe0= -github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211 h1:mSVZ4vj4khv+oThUfS+SQU3UuFIZ5Zo6UNcvK8E8Mz8= -github.com/gobuffalo/gogen v0.1.1 h1:dLg+zb+uOyd/mKeQUYIbwbNmfRsr9hd/WtYWepmayhI= -github.com/gobuffalo/here v0.6.0 h1:hYrd0a6gDmWxBM4TnrGw8mQg24iSVoIkHEk7FodQcBI= -github.com/gobuffalo/logger v1.0.0 h1:xw9Ko9EcC5iAFprrjJ6oZco9UpzS5MQ4jAwghsLHdy4= -github.com/gobuffalo/mapi v1.0.2 h1:fq9WcL1BYrm36SzK6+aAnZ8hcp+SrmnDyAxhNx8dvJk= -github.com/gobuffalo/packr/v2 v2.5.1 h1:TFOeY2VoGamPjQLiNDT3mn//ytzk236VMO2j7iHxJR4= -github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754 h1:tpom+2CJmpzAWj5/VEHync2rJGi+epHNIeRSWjzGA+4= -github.com/gocql/gocql v0.0.0-20210515062232-b7ef815b4556 h1:N/MD/sr6o61X+iZBAT2qEUF023s4KbA8RWfKzl0L6MQ= -github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8= -github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro= -github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= -github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI= -github.com/golang-jwt/jwt/v4 v4.1.0 h1:XUgk2Ex5veyVFVeLm0xhusUTQybEbexJXrvPNOKkSY0= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/flatbuffers v2.0.0+incompatible h1:dicJ2oXwypfwUGnB2/TYWYEKiuk9eYQlQO/AnOHl5mI= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.6.13/go.mod h1:qEySVqXrEugbHKvmhI8ZqtQi75/RHSSRNpffvB4I6Bw= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.5.1 h1:/+mFTs4AlwsJ/mJe8NDtKb7BxLtbZFpcn8vDsneEkwQ= -github.com/google/go-github/v39 v39.2.0 h1:rNNM311XtPOz5rDdsJXAp2o8F67X9FnROXTvto3aSnQ= -github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= -github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= -github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.2.0 h1:y8Yozv7SZtlU//QXbezB6QkpuE6jMD2/gfzk4AftXjs= github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.6.0 h1:SXk3ABtQYDT/OH8jAyvEOQ58mgawq5C4o/4/89qN2ZU= -github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= -github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= -github.com/googleapis/go-type-adapters v1.0.0 h1:9XdMn+d/G57qq1s8dNc5IesGCXHf6V2HZ2JwRxfA2tA= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8 h1:tlyzajkF3030q6M8SvmJSemC9DTHL/xaMa18b65+JM4= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= -github.com/gorilla/handlers v1.4.2 h1:0QniY0USkHQ1RGCLfKxeNHK9bkDHGRYGNDFBCS+YARg= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= -github.com/hashicorp/consul/api v1.15.3 h1:WYONYL2rxTXtlekAqblR2SCdJsizMDIj/uXb5wNy9zU= -github.com/hashicorp/consul/sdk v0.1.1 h1:LnuDWGNsoajlhGyHJvuWW6FVqRl8JOTPqS6CPTsYjhY= -github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-hclog v1.2.0 h1:La19f8d7WIlm4ogzNHB0JGqs5AUDAZ2UfCY4sJXcJdM= -github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= -github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= -github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= -github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs= -github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= -github.com/hashicorp/go.net v0.0.1 h1:sNCoNyDEvN1xa+X0baata4RdcpKwcMS6DH+xwfqPgjw= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= -github.com/hashicorp/mdns v1.0.0 h1:WhIgCr5a7AaVH6jPUwjtRuuE7/RDufnUvzIr48smyxs= -github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M= -github.com/hashicorp/serf v0.9.8 h1:JGklO/2Drf1QGa312EieQN3zhxQ+aJg6pG+aC3MFaVo= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= -github.com/hyperledger/firefly-common v1.2.8 h1:tPHgjGzQnPRXl77qHQd+dieu6N62NFEWhoGW7sn9u6U= -github.com/hyperledger/firefly-common v1.2.8/go.mod h1:q6uawjzWAFekIMFb0t9EWpJQQvjvl2CFBikk6++8Woc= -github.com/hyperledger/firefly-common v1.2.10 h1:im+GPaTnv764WmNvmtFFXz7+wx2IXg58Pf8AtDapM5o= -github.com/hyperledger/firefly-common v1.2.10/go.mod h1:kSAawo5is7RKWh3e9PMdWuR/OitfHAaAWVSK6V4lX80= -github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639 h1:mV02weKRL81bEnm8A0HT1/CAelMQDBuQIfLw8n+d6xI= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/intel/goresctrl v0.2.0 h1:JyZjdMQu9Kl/wLXe9xA6s1X+tF6BWsQPFGJMEeCfWzE= -github.com/j-keck/arping v1.0.2 h1:hlLhuXgQkzIJTZuhMigvG/CuSkaspeaD9hRDk2zuiMI= -github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= -github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= -github.com/jackc/pgconn v1.8.0 h1:FmjZ0rOyXTr1wfWs45i4a9vjnjWUAGpMuQLD9OSs+lw= -github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451 h1:WAvSpGf7MsFuzAtK4Vk7R4EVe+liW4x83r4oWu0WHKw= -github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= -github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2 h1:JVX6jT/XfzNqIjye4717ITLaNwV9mWbJx0dLCpcRzdA= -github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= -github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= -github.com/jackc/pgproto3/v2 v2.0.7 h1:6Pwi1b3QdY65cuv6SyVO0FgPd5J3Bl7wf/nQQjinHMA= -github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= -github.com/jackc/pgtype v1.6.2 h1:b3pDeuhbbzBYcg5kwNmNDun4pFUD/0AAr1kLXZLeNt8= -github.com/jackc/pgx/v4 v4.10.1 h1:/6Q3ye4myIj6AaplUm+eRcz4OhK9HAvFf4ePsG40LJY= -github.com/jackc/puddle v1.1.3 h1:JnPg/5Q9xVJGfjsO5CPUOjnJps1JaRUm8I9FXVCFK94= -github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= -github.com/jinzhu/now v1.1.1 h1:g39TucaRWyV3dwDO++eEc6qf8TVIQ/Da48WmqjZ3i7E= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmoiron/sqlx v1.3.1 h1:aLN7YINNZ7cYOPK3QC83dbM6KT0NMqVMw961TqrejlE= -github.com/joefitzgerald/rainbow-reporter v0.1.0 h1:AuMG652zjdzI0YCCnXAqATtRBpGXMcAnrajcaTrSeuo= -github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= -github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= -github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5 h1:PJr+ZMXIecYc1Ey2zucXdR73SMBtgjPgwa31099IMv0= -github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 h1:uC1QfSlInpQF+M0ao65imhwqKnz3Q2z/d8PWZRMQvDM= -github.com/k0kubun/pp v2.3.0+incompatible h1:EKhKbi34VQDWJtq+zpsKSEhkHHs9w2P8Izbq8IhLVSo= -github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= -github.com/karrick/godirwalk v1.10.12 h1:BqUm+LuJcXjGv1d2mj3gBiQyrQ57a0rYoAmhvJQ7RDU= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= -github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY= -github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= -github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= -github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= -github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= -github.com/kr/pty v1.1.8 h1:AkaSdXYQOWeaO3neb8EM634ahkXXe3jYbVh/F9lq+GI= -github.com/ktrysmt/go-bitbucket v0.6.4 h1:C8dUGp0qkwncKtAnozHCbbqhptefzEd1I0sfnuy9rYQ= -github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3 h1:jUp75lepDg0phMUJBCmvaeFDldD2N3S1lBuPwUTszio= -github.com/lyft/protoc-gen-star v0.5.3 h1:zSGLzsUew8RT+ZKPHc3jnf8XLaVyHzTcAFBzHtCNR20= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2 h1:JgVTCPf0uBVcUSWpyXmGpgOc62nK5HWUBKAGc3Qqa5k= -github.com/markbates/pkger v0.15.1 h1:3MPelV53RnGSW07izx5xGxl4e/sdRD6zqseIk0rMASY= -github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI= -github.com/marstr/guid v1.1.0 h1:/M4H/1G4avsieL6BbUwCOBzulmoeKVP5ux/3mQNnbyI= -github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= -github.com/mattn/go-runewidth v0.0.2 h1:UnlwIPBGaTZfPQ6T1IGzPI0EkYAQmT9fAEJ/poFC63o= -github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= -github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2 h1:g+4J5sZg6osfvEfkRZxJ1em0VT95/UOZgi/l7zi1/oE= -github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA= -github.com/miekg/pkcs11 v1.0.3 h1:iMwmD7I5225wv84WxIG/bmxz9AXjWvTWIbM/TYHvWtw= -github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk= -github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= -github.com/mitchellh/gox v0.4.0 h1:lfGJxY7ToLJQjHHwi0EX6uYBdK78egf954SQl13PQJc= -github.com/mitchellh/iochan v1.0.0 h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= -github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f h1:2+myh5ml7lgEU/51gbeLHfKGNfgEQQIWrlbdaOsidbQ= -github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= -github.com/moby/sys/mountinfo v0.5.0 h1:2Ks8/r6lopsxWi9m58nlwjaeSzUX9iiL1vj5qB/9ObI= -github.com/moby/sys/signal v0.6.0 h1:aDpY94H8VlhTGa9sNYUFCFsMZIUh5wm0B6XkIoJj/iY= -github.com/moby/sys/symlink v0.2.0 h1:tk1rOM+Ljp0nFmfOIBtlV3rTDlWOwFRhjEeAhZB0nZc= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe h1:iruDEfMl2E6fbMZ9s0scYfZQ84/6SPL6zC8ACM2oIL0= -github.com/mrunalp/fileutils v0.5.0 h1:NKzVxiH7eSk+OQ4M+ZYW1K6h27RUV3MI6NUTsHhU6Z4= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/mutecomm/go-sqlcipher/v4 v4.4.0 h1:sV1tWCWGAVlPhNGT95Q+z/txFxuhAYWwHD1afF5bMZg= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= -github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8 h1:P48LjvUQpTReR3TQRbxSeSBsMXzfK0uol7eRcr7VBYQ= -github.com/ncw/swift v1.0.47 h1:4DQRPj35Y41WogBxyhOXlrI37nzGlyEcsforeudyYPQ= -github.com/neo4j/neo4j-go-driver v1.8.1-0.20200803113522-b626aa943eba h1:fhFP5RliM2HW/8XdcO5QngSfFli9GcRIpMXvypTQt6E= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5 h1:58+kh9C6jJVXYjt8IE48G2eWl6BjwU5Gj0gqY84fy78= -github.com/opencontainers/runc v1.1.0 h1:O9+X96OcDjkmmZyfaG996kV7yq8HsoU2h1XRRQcefG8= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= -github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39 h1:H7DMc6FAjgwZZi8BRqjrAAHWoqEr5e5L6pS4V0ezet4= -github.com/opencontainers/selinux v1.10.0 h1:rAiKF8hTcgLI3w0DHm6i0ylVVcOrlgR1kK99DRLDhyU= -github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs= -github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= -github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= -github.com/phpdave11/gofpdf v1.4.2 h1:KPKiIbfwbvC/wOncwhrpRdXVj2CZTCFlw4wnoyjtHfQ= -github.com/phpdave11/gofpdi v1.0.12 h1:RZb9NG62cw/RW0rHAduVRo+98R8o/G1krcg2ns7DakQ= -github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= -github.com/pierrec/lz4/v4 v4.1.8 h1:ieHkV+i2BRzngO4Wd/3HGowuZStgq6QkPsD1eolNAO4= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= -github.com/pkg/sftp v1.13.1 h1:I2qBYMChEhIjOgazfJmV3/mZM256btk6wkCDRmW7JYs= -github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021 h1:0XM1XL/OFFJjXsYXlG30spTkV/E9+gmd5GD1w2HE8xM= -github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA= -github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= -github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/mattn/go-sqlite3 v1.14.19 h1:fhGleo2h1p8tVChob4I9HpmVFIAkKGpiukdrgQbWfGI= +github.com/mattn/go-sqlite3 v1.14.19/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= -github.com/rs/zerolog v1.15.0 h1:uPRuwkWF4J6fGsJ2R0Gn2jB1EQiav9k3S6CSdygQJXY= -github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= -github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= -github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58 h1:nlG4Wa5+minh3S9LVFtNoY+GVRiudA2e3EVfcCi3RCA= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f h1:UFr9zpz4xgTnIE5yIMtWAMngCdZ9p/+q6lTbgelo80M= -github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1 h1:ZFfeKAhIQiiOrQaI3/znw0gOmYpO28Tcu1YaqMa/jtQ= -github.com/sagikazarmark/crypt v0.8.0 h1:xtk0uUHVWVsRBdEUGYBym4CXbcllXky2M7Qlwsf8C0Y= -github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= -github.com/sclevine/agouti v3.0.0+incompatible h1:8IBJS6PWz3uTlMP3YBIR5f+KAldcGuOeFkFbUWfBgK4= -github.com/sclevine/spec v1.2.0 h1:1Jwdf9jSfDl9NVmt8ndHqbTZ7XCCPbh1jI3hkDBHVYA= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= -github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921 h1:58EBmR2dMNL2n/FnbQewK3D14nXr0V9CObDSvMJLq+Y= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= -github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= -github.com/snowflakedb/gosnowflake v1.6.3 h1:EJDdDi74YbYt1ty164ge3fMZ0eVZ6KA7b1zmAa/wnRo= -github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= -github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= -github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= -github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I= -github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= -github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= -github.com/tchap/go-patricia v2.2.6+incompatible h1:JvoDL7JSoIP2HDE8AbDH3zC8QBPxmzYe32HHy5yQ+Ck= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= -github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= -github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ= -github.com/ugorji/go v1.1.4 h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8 h1:3SVOIvH7Ae1KRYyQWRjXWJEA9sS/c/pjvH++55Gr648= -github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo= -github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5 h1:+UB2BJA852UkGH42H+Oee69djmxS3ANzl2b/JtT1YiA= -github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA= -github.com/willf/bitset v1.1.11 h1:N7Z7E9UvjW+sGsEl7k/SJrvY2reP1A07MrGuCjIOjRE= -github.com/xanzy/go-gitlab v0.15.0 h1:rWtwKTgEnXyNUGrOArN7yyc3THRkpYcKXIXia9abywQ= -github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= -github.com/xdg-go/scram v1.0.2 h1:akYIkZ28e6A96dkWNJQu3nmCzH3YfwMPQExUYDaRv7w= -github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow= -github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA= -github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI= -github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE= -github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY= -github.com/zenazn/goji v0.9.0 h1:RSQQAbXGArQ0dIDEq+PI6WqN6if+5KHu6x2Cx/GXLTQ= -gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b h1:7gd+rd8P3bqcn/96gOZa3F5dpJr/vEiDQYlNb/y2uNs= -go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 h1:1JFLBqwIgdyHN1ZtgjTBwO+blA6gVOmZurpiMEsETKo= -go.etcd.io/etcd/api/v3 v3.5.5 h1:BX4JIbQ7hl7+jL+g+2j5UAr0o1bctCm6/Ct+ArBGkf0= -go.etcd.io/etcd/client/pkg/v3 v3.5.5 h1:9S0JUVvmrVl7wCF39iTQthdaaNIiAaQbmK75ogO6GU8= -go.etcd.io/etcd/client/v2 v2.305.5 h1:DktRP60//JJpnPC0VBymAN/7V71GHMdjDCBt4ZPXDjI= -go.etcd.io/etcd/client/v3 v3.5.5 h1:q++2WTJbUgpQu4B6hCuT7VkdwaTP7Qz6Daak3WzbrlI= -go.etcd.io/etcd/pkg/v3 v3.5.0 h1:ntrg6vvKRW26JRmHTE0iNlDgYK6JX3hg/4cD62X0ixk= -go.etcd.io/etcd/raft/v3 v3.5.0 h1:kw2TmO3yFTgE+F0mdKkG7xMxkit2duBDa2Hu6D/HMlw= -go.etcd.io/etcd/server/v3 v3.5.0 h1:jk8D/lwGEDlQU9kZXUFMSANkE22Sg5+mW27ip8xcF9E= -go.mongodb.org/mongo-driver v1.7.0 h1:hHrvOBWlWB2c7+8Gh/Xi5jj82AgidK/t7KVXBZ+IyUA= -go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0 h1:Ky1MObd188aGbgb5OgNnwGuEEwI9MVIcc7rBW6zk5Ak= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0 h1:Q3C9yzW6I9jqEc8sawxzxZmY48fs9u220KXq6d5s3XU= -go.opentelemetry.io/otel v1.3.0 h1:APxLf0eiBwLl+SOXiJJCVYzA1OOJNyAoV8C5RNRyy7Y= -go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0 h1:R/OBkMoGgfy2fLhs2QhkCI1w4HLEQX92GCcJB6SSdNk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0 h1:giGm8w67Ja7amYNfYMdme7xSp2pIxThWopw8+QP51Yk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0 h1:VQbUHoJqytHHSJ1OZodPH9tvZZSVzUHjPHpkO85sT6k= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0 h1:Ydage/P0fRrSPpZeCVxzjqGcI6iVmG2xb43+IR8cjqM= -go.opentelemetry.io/otel/metric v0.20.0 h1:4kzhXFP+btKm4jwxpjIqjs41A7MakRFUS86bqLHTIw8= -go.opentelemetry.io/otel/oteltest v0.20.0 h1:HiITxCawalo5vQzdHfKeZurV8x7ljcqAgiWzF6Vaeaw= -go.opentelemetry.io/otel/sdk v1.3.0 h1:3278edCoH89MEJ0Ky8WQXVmDQv3FX4ZJ3Pp+9fJreAI= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0 h1:c5VRjxCXdQlx1HjzwGdQHzZaVI82b5EbBgOu2ljD92g= -go.opentelemetry.io/otel/sdk/metric v0.20.0 h1:7ao1wpzHRVKf0OQ7GIxiQJA6X7DLX9o14gmVon7mMK8= -go.opentelemetry.io/otel/trace v1.3.0 h1:doy8Hzb1RJ+I3yFhtDmwNc7tIyw1tNMOIsyPzp1NOGY= -go.opentelemetry.io/proto/otlp v0.11.0 h1:cLDgIBTf4lLOlztkhzAEdQsJ4Lj+i5Wc9k6Nn0K1VyU= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= -go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= -golang.org/x/image v0.0.0-20210216034530-4410531fe030 h1:lP9pYkih3DUSC641giIXa2XqfTIbbbRr0w2EOTA7wHA= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220907135653-1e95f45603a7/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8= golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -606,28 +299,40 @@ golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/sys v0.0.0-20220908150016-7ac13a9a928d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= -golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gonum.org/v1/gonum v0.9.3 h1:DnoIG+QAMaF5NvxnGe/oKsgKcAc6PcUyl8q0VetfQ8s= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= -gonum.org/v1/plot v0.9.0 h1:3sEo36Uopv1/SA/dMFFaxXoL5XyikJ9Sf2Vll/k6+2E= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= @@ -637,15 +342,36 @@ google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69 google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= -google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= -google.golang.org/api v0.102.0 h1:JxJl2qQ85fRMPNvlZY/enexbxpCjLwGhZUtgfGeQ51I= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8 h1:Cpp2P6TPjujNoC5M2KHY6g7wfyLYfIWRZaSdIKfDasA= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= @@ -654,6 +380,7 @@ google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2 google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= @@ -664,71 +391,37 @@ google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 h1:M1YKkFIboKNieVO5DLUEVzQfGwJD30Nv2jfUgzb5UcE= +google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo= -gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= -gopkg.in/cheggaaa/pb.v1 v1.0.25 h1:Ev7yu1/f6+d+b3pi5vPdRPc6nNtP1umSfcWiEfRqv6I= -gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0= -gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec h1:RlWgLqCMMIYYEVcAR5MDsuHlVkaIPDAF+5Dehzg8L5A= -gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= -gopkg.in/resty.v1 v1.12.0 h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI= -gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= -gorm.io/driver/postgres v1.0.8 h1:PAgM+PaHOSAeroTjHkCHCBIHHoBIf9RgPWGo8dF2DA8= -gorm.io/gorm v1.21.4 h1:J0xfPJMRfHgpVcYLrEAIqY/apdvTIkrltPQNHQLq9Qc= -honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= -k8s.io/api v0.22.5 h1:xk7C+rMjF/EGELiD560jdmwzrB788mfcHiNbMQLIVI8= -k8s.io/apimachinery v0.22.5 h1:cIPwldOYm1Slq9VLBRPtEYpyhjIm1C6aAMAoENuvN9s= -k8s.io/apiserver v0.22.5 h1:71krQxCUz218ecb+nPhfDsNB6QgP1/4EMvi1a2uYBlg= -k8s.io/client-go v0.22.5 h1:I8Zn/UqIdi2r02aZmhaJ1hqMxcpfJ3t5VqvHtctHYFo= -k8s.io/code-generator v0.19.7 h1:kM/68Y26Z/u//TFc1ggVVcg62te8A2yQh57jBfD0FWQ= -k8s.io/component-base v0.22.5 h1:U0eHqZm7mAFE42hFwYhY6ze/MmVaW00JpMrzVsQmzYE= -k8s.io/cri-api v0.23.1 h1:0DHL/hpTf4Fp+QkUXFefWcp1fhjXr9OlNdY9X99c+O8= -k8s.io/gengo v0.0.0-20201113003025-83324d819ded h1:JApXBKYyB7l9xx+DK7/+mFjC7A9Bt5A93FPvFD0HIFE= -k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw= -k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c h1:jvamsI1tn9V0S8jicyX82qaFC0H/NKxv2e5mbqsgR80= -k8s.io/kubernetes v1.13.0 h1:qTfB+u5M92k2fCCCVP2iuhgwwSOv1EkAkvQY1tQODD8= -k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b h1:wxEMGetGMur3J1xuGLQY7GEQYg9bZxKn3tKo5k/eYcs= -modernc.org/b v1.0.0 h1:vpvqeyp17ddcQWF29Czawql4lDdABCDRbXRAS4+aF2o= -modernc.org/cc/v3 v3.32.4 h1:1ScT6MCQRWwvwVdERhGPsPq0f55J1/pFEOCiqM7zc78= -modernc.org/ccgo/v3 v3.9.2 h1:mOLFgduk60HFuPmxSix3AluTEh7zhozkby+e1VDo/ro= -modernc.org/db v1.0.0 h1:2c6NdCfaLnshSvY7OU09cyAY0gYXUZj4lmg5ItHyucg= -modernc.org/file v1.0.0 h1:9/PdvjVxd5+LcWUQIfapAWRGOkDLK90rloa8s/au06A= -modernc.org/fileutil v1.0.0 h1:Z1AFLZwl6BO8A5NldQg/xTSjGLetp+1Ubvl4alfGx8w= -modernc.org/golex v1.0.0 h1:wWpDlbK8ejRfSyi0frMyhilD3JBvtcx2AdGDnU+JtsE= -modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM= -modernc.org/internal v1.0.0 h1:XMDsFDcBDsibbBnHB2xzljZ+B1yrOVLEFkKL2u15Glw= -modernc.org/libc v1.9.5 h1:zv111ldxmP7DJ5mOIqzRbza7ZDl3kh4ncKfASB2jIYY= -modernc.org/lldb v1.0.0 h1:6vjDJxQEfhlOLwl4bhpwIz00uyFK4EmSYcbwqwbynsc= -modernc.org/mathutil v1.2.2 h1:+yFk8hBprV+4c0U9GjFtL+dV3N8hOJ8JCituQcMShFY= -modernc.org/memory v1.0.4 h1:utMBrFcpnQDdNsmM6asmyH/FM9TqLPS7XF7otpJmrwM= -modernc.org/opt v0.1.1 h1:/0RX92k9vwVeDXj+Xn23DKp2VJubL7k8qNffND6qn3A= -modernc.org/ql v1.0.0 h1:bIQ/trWNVjQPlinI6jdOQsi195SIturGo3mp5hsDqVU= -modernc.org/sortutil v1.1.0 h1:oP3U4uM+NT/qBQcbg/K2iqAX0Nx7B1b6YZtq3Gk/PjM= -modernc.org/sqlite v1.10.6 h1:iNDTQbULcm0IJAqrzCm2JcCqxaKRS94rJ5/clBMRmc8= -modernc.org/strutil v1.1.0 h1:+1/yCzZxY2pZwwrsbH+4T7BQMoLQ9QiBshRC9eicYsc= -modernc.org/tcl v1.5.2 h1:sYNjGr4zK6cDH74USl8wVJRrvDX6UOLpG0j4lFvR0W0= -modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk= -modernc.org/z v1.0.1 h1:WyIDpEpAIx4Hel6q/Pcgj/VhaQV5XPJ2I6ryIYbjnpc= -modernc.org/zappy v1.0.0 h1:dPVaP+3ueIUv4guk8PuZ2wiUGcJ1WUVvIheeSSTD0yk= -rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= -rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4= -rsc.io/quote/v3 v3.1.0 h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY= -rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22 h1:fmRfl9WJ4ApJn7LxNuED4m0t18qivVQOxP6aAYG9J6c= -sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/internal/apiserver/ffi2swagger.go b/internal/apiserver/ffi2swagger.go index 83f1d8805b..c6f63d1c23 100644 --- a/internal/apiserver/ffi2swagger.go +++ b/internal/apiserver/ffi2swagger.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -35,7 +35,7 @@ import ( ) type FFISwaggerGen interface { - Generate(ctx context.Context, baseURL string, api *core.ContractAPI, ffi *fftypes.FFI) *openapi3.T + Build(ctx context.Context, api *core.ContractAPI, ffi *fftypes.FFI) (*ffapi.SwaggerGenOptions, []*ffapi.Route) } type ContractListenerInput struct { @@ -49,15 +49,9 @@ type ContractListenerInputWithLocation struct { Location *fftypes.JSONAny `ffstruct:"ContractListener" json:"location,omitempty"` } -// ffiSwaggerGen generates OpenAPI3 (Swagger) definitions for FFIs -type ffiSwaggerGen struct { -} - -func NewFFISwaggerGen() FFISwaggerGen { - return &ffiSwaggerGen{} -} +type ffiSwaggerGen struct{} -func (og *ffiSwaggerGen) Generate(ctx context.Context, baseURL string, api *core.ContractAPI, ffi *fftypes.FFI) (swagger *openapi3.T) { +func (swg *ffiSwaggerGen) Build(ctx context.Context, api *core.ContractAPI, ffi *fftypes.FFI) (*ffapi.SwaggerGenOptions, []*ffapi.Route) { hasLocation := !api.Location.IsNil() routes := []*ffapi.Route{ @@ -71,22 +65,21 @@ func (og *ffiSwaggerGen) Generate(ctx context.Context, baseURL string, api *core }, } for _, method := range ffi.Methods { - routes = og.addMethod(ctx, routes, method, hasLocation) + routes = addFFIMethod(ctx, routes, method, hasLocation) } for _, event := range ffi.Events { - routes = og.addEvent(routes, event, hasLocation) + routes = addFFIEvent(ctx, routes, event, hasLocation) } - return ffapi.NewSwaggerGen(&ffapi.Options{ + return &ffapi.SwaggerGenOptions{ Title: ffi.Name, Version: ffi.Version, Description: ffi.Description, - BaseURL: baseURL, DefaultRequestTimeout: config.GetDuration(coreconfig.APIRequestTimeout), - }).Generate(ctx, routes) + }, routes } -func (og *ffiSwaggerGen) addMethod(ctx context.Context, routes []*ffapi.Route, method *fftypes.FFIMethod, hasLocation bool) []*ffapi.Route { +func addFFIMethod(ctx context.Context, routes []*ffapi.Route, method *fftypes.FFIMethod, hasLocation bool) []*ffapi.Route { description := method.Description if method.Details != nil && len(method.Details) > 0 { additionalDetailsHeader := i18n.Expand(ctx, coremsgs.APISmartContractDetails) @@ -97,11 +90,9 @@ func (og *ffiSwaggerGen) addMethod(ctx context.Context, routes []*ffapi.Route, m Path: fmt.Sprintf("invoke/%s", method.Pathname), // must match a route defined in apiserver routes! Method: http.MethodPost, JSONInputSchema: func(ctx context.Context, schemaGen ffapi.SchemaGenerator) (*openapi3.SchemaRef, error) { - return contractJSONSchema(ctx, &method.Params, hasLocation) - }, - JSONOutputSchema: func(ctx context.Context, schemaGen ffapi.SchemaGenerator) (*openapi3.SchemaRef, error) { - return contractJSONSchema(ctx, &method.Returns, true) + return contractRequestJSONSchema(ctx, &method.Params, hasLocation) }, + JSONOutputValue: func() interface{} { return &core.OperationWithDetail{} }, JSONOutputCodes: []int{http.StatusOK}, PreTranslatedDescription: description, }) @@ -110,10 +101,10 @@ func (og *ffiSwaggerGen) addMethod(ctx context.Context, routes []*ffapi.Route, m Path: fmt.Sprintf("query/%s", method.Pathname), // must match a route defined in apiserver routes! Method: http.MethodPost, JSONInputSchema: func(ctx context.Context, schemaGen ffapi.SchemaGenerator) (*openapi3.SchemaRef, error) { - return contractJSONSchema(ctx, &method.Params, hasLocation) + return contractRequestJSONSchema(ctx, &method.Params, hasLocation) }, JSONOutputSchema: func(ctx context.Context, schemaGen ffapi.SchemaGenerator) (*openapi3.SchemaRef, error) { - return contractJSONSchema(ctx, &method.Returns, true) + return contractQueryResponseJSONSchema(ctx, &method.Returns) }, JSONOutputCodes: []int{http.StatusOK}, PreTranslatedDescription: description, @@ -121,8 +112,7 @@ func (og *ffiSwaggerGen) addMethod(ctx context.Context, routes []*ffapi.Route, m return routes } -func (og *ffiSwaggerGen) addEvent(routes []*ffapi.Route, event *fftypes.FFIEvent, hasLocation bool) []*ffapi.Route { - ctx := context.Background() +func addFFIEvent(ctx context.Context, routes []*ffapi.Route, event *fftypes.FFIEvent, hasLocation bool) []*ffapi.Route { description := event.Description if event.Details != nil && len(event.Details) > 0 { additionalDetailsHeader := i18n.Expand(ctx, coremsgs.APISmartContractDetails) @@ -154,10 +144,10 @@ func (og *ffiSwaggerGen) addEvent(routes []*ffapi.Route, event *fftypes.FFIEvent } /** - * Parse the FFI and build a corresponding JSON Schema to describe the request body for "invoke". - * Returns the JSON Schema as an `fftypes.JSONObject`. + * Parse the FFI and build a corresponding JSON Schema to describe the request body for "invoke" or "query" requests + * Returns the JSON Schema as an `fftypes.JSONObject` */ -func contractJSONSchema(ctx context.Context, params *fftypes.FFIParams, hasLocation bool) (*openapi3.SchemaRef, error) { +func contractRequestJSONSchema(ctx context.Context, params *fftypes.FFIParams, hasLocation bool) (*openapi3.SchemaRef, error) { paramSchema := make(fftypes.JSONObject, len(*params)) for _, param := range *params { paramSchema[param.Name] = param.Schema @@ -201,6 +191,40 @@ func contractJSONSchema(ctx context.Context, params *fftypes.FFIParams, hasLocat return openapi3.NewSchemaRef("", s), nil } +/** + * Parse the FFI and build a corresponding JSON Schema to describe the response body for "query" requests + * Returns the JSON Schema as an `fftypes.JSONObject` + */ +func contractQueryResponseJSONSchema(ctx context.Context, params *fftypes.FFIParams) (*openapi3.SchemaRef, error) { + paramSchema := make(fftypes.JSONObject, len(*params)) + for i, param := range *params { + paramName := param.Name + if paramName == "" { + if i > 0 { + paramName = fmt.Sprintf("output%v", i) + } else { + paramName = "output" + } + } + paramSchema[paramName] = param.Schema + } + outputSchema := fftypes.JSONObject{ + "type": "object", + "description": i18n.Expand(ctx, coremsgs.ContractCallRequestOutput), + "properties": paramSchema, + } + b, err := json.Marshal(outputSchema) + if err != nil { + return nil, err + } + s := openapi3.NewSchema() + err = s.UnmarshalJSON(b) + if err != nil { + return nil, err + } + return openapi3.NewSchemaRef("", s), nil +} + func buildDetailsTable(ctx context.Context, details map[string]interface{}) string { keyHeader := i18n.Expand(ctx, coremsgs.APISmartContractDetailsKey) valueHeader := i18n.Expand(ctx, coremsgs.APISmartContractDetailsKey) diff --git a/internal/apiserver/ffi2swagger_test.go b/internal/apiserver/ffi2swagger_test.go index 9f86245248..4551c1c824 100644 --- a/internal/apiserver/ffi2swagger_test.go +++ b/internal/apiserver/ffi2swagger_test.go @@ -23,6 +23,7 @@ import ( "github.com/getkin/kin-openapi/openapi3" "github.com/ghodss/yaml" + "github.com/hyperledger/firefly-common/pkg/ffapi" "github.com/hyperledger/firefly-common/pkg/fftypes" "github.com/hyperledger/firefly/pkg/core" "github.com/stretchr/testify/assert" @@ -114,9 +115,10 @@ func paramNames(p openapi3.Schemas) []string { } func TestGenerate(t *testing.T) { - g := NewFFISwaggerGen() api := &core.ContractAPI{} - doc := g.Generate(context.Background(), "http://localhost:12345", api, testFFI()) + options, routes := (&ffiSwaggerGen{}).Build(context.Background(), api, testFFI()) + options.BaseURL = "http://localhost:12345" + doc := ffapi.NewSwaggerGen(options).Generate(context.Background(), routes) b, err := yaml.Marshal(doc) assert.NoError(t, err) @@ -150,9 +152,10 @@ func TestGenerate(t *testing.T) { } func TestGenerateWithLocation(t *testing.T) { - g := NewFFISwaggerGen() api := &core.ContractAPI{Location: fftypes.JSONAnyPtr(`{}`)} - doc := g.Generate(context.Background(), "http://localhost:12345", api, testFFI()) + options, routes := (&ffiSwaggerGen{}).Build(context.Background(), api, testFFI()) + options.BaseURL = "http://localhost:12345" + doc := ffapi.NewSwaggerGen(options).Generate(context.Background(), routes) b, err := yaml.Marshal(doc) assert.NoError(t, err) @@ -193,7 +196,7 @@ func TestFFIParamBadSchema(t *testing.T) { Schema: fftypes.JSONAnyPtr(`{`), }, } - _, err := contractJSONSchema(ctx, params, true) + _, err := contractRequestJSONSchema(ctx, params, true) assert.Error(t, err) params = &fftypes.FFIParams{ @@ -202,6 +205,56 @@ func TestFFIParamBadSchema(t *testing.T) { Schema: fftypes.JSONAnyPtr(`{"type": false}`), }, } - _, err = contractJSONSchema(ctx, params, true) + _, err = contractRequestJSONSchema(ctx, params, true) + assert.Error(t, err) +} + +func TestUnnamedOutputs(t *testing.T) { + ctx := context.Background() + params := &fftypes.FFIParams{ + { + Name: "", + Schema: fftypes.JSONAnyPtr(`{}`), + }, + { + Name: "", + Schema: fftypes.JSONAnyPtr(`{}`), + }, + } + + expectedJSON := `{ + "description": "A map of named outputs", + "properties": { + "output": {}, + "output1": {} + }, + "type": "object" + }` + + ref, err := contractQueryResponseJSONSchema(ctx, params) + assert.NoError(t, err) + b, err := ref.MarshalJSON() + assert.JSONEq(t, expectedJSON, string(b)) +} + +func TestBadSchema(t *testing.T) { + ctx := context.Background() + params := &fftypes.FFIParams{ + { + Name: "", + Schema: fftypes.JSONAnyPtr(`{`), + }, + } + _, err := contractQueryResponseJSONSchema(ctx, params) + assert.Error(t, err) + + ctx = context.Background() + params = &fftypes.FFIParams{ + { + Name: "", + Schema: fftypes.JSONAnyPtr(`{"type": false}`), + }, + } + _, err = contractQueryResponseJSONSchema(ctx, params) assert.Error(t, err) } diff --git a/internal/apiserver/route_delete_contract_api.go b/internal/apiserver/route_delete_contract_api.go new file mode 100644 index 0000000000..5d763c7239 --- /dev/null +++ b/internal/apiserver/route_delete_contract_api.go @@ -0,0 +1,43 @@ +// Copyright © 2023 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package apiserver + +import ( + "net/http" + + "github.com/hyperledger/firefly-common/pkg/ffapi" + "github.com/hyperledger/firefly/internal/coremsgs" +) + +var deleteContractAPI = &ffapi.Route{ + Name: "deleteContractAPI", + Path: "apis/{apiName}", + Method: http.MethodDelete, + PathParams: []*ffapi.PathParam{ + {Name: "apiName", Description: coremsgs.APIParamsContractAPIName}, + }, + QueryParams: nil, + Description: coremsgs.APIEndpointsDeleteContractAPI, + JSONInputValue: nil, + JSONOutputValue: nil, + JSONOutputCodes: []int{http.StatusNoContent}, // Sync operation, no output + Extensions: &coreExtensions{ + CoreJSONHandler: func(r *ffapi.APIRequest, cr *coreRequest) (output interface{}, err error) { + return nil, cr.or.Contracts().DeleteContractAPI(cr.ctx, r.PP["apiName"]) + }, + }, +} diff --git a/internal/apiserver/route_delete_contract_api_test.go b/internal/apiserver/route_delete_contract_api_test.go new file mode 100644 index 0000000000..ec594686f7 --- /dev/null +++ b/internal/apiserver/route_delete_contract_api_test.go @@ -0,0 +1,41 @@ +// Copyright © 2021 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package apiserver + +import ( + "net/http/httptest" + "testing" + + "github.com/hyperledger/firefly/mocks/contractmocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestDeleteContractAPI(t *testing.T) { + o, r := newTestAPIServer() + o.On("Authorize", mock.Anything, mock.Anything).Return(nil) + mcm := &contractmocks.Manager{} + o.On("Contracts").Return(mcm) + req := httptest.NewRequest("DELETE", "/api/v1/namespaces/ns1/apis/banana", nil) + req.Header.Set("Content-Type", "application/json; charset=utf-8") + res := httptest.NewRecorder() + + mcm.On("DeleteContractAPI", mock.Anything, "banana").Return(nil) + r.ServeHTTP(res, req) + + assert.Equal(t, 204, res.Result().StatusCode) +} diff --git a/internal/apiserver/route_delete_contract_interface.go b/internal/apiserver/route_delete_contract_interface.go new file mode 100644 index 0000000000..64631f426b --- /dev/null +++ b/internal/apiserver/route_delete_contract_interface.go @@ -0,0 +1,48 @@ +// Copyright © 2023 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package apiserver + +import ( + "net/http" + + "github.com/hyperledger/firefly-common/pkg/ffapi" + "github.com/hyperledger/firefly-common/pkg/fftypes" + "github.com/hyperledger/firefly/internal/coremsgs" +) + +var deleteContractInterface = &ffapi.Route{ + Name: "deleteContractInterface", + Path: "contracts/interfaces/{interfaceId}", + Method: http.MethodDelete, + PathParams: []*ffapi.PathParam{ + {Name: "interfaceId", Description: coremsgs.APIParamsContractInterfaceID}, + }, + QueryParams: nil, + Description: coremsgs.APIEndpointsDeleteContractInterface, + JSONInputValue: nil, + JSONOutputValue: nil, + JSONOutputCodes: []int{http.StatusNoContent}, // Sync operation, no output + Extensions: &coreExtensions{ + CoreJSONHandler: func(r *ffapi.APIRequest, cr *coreRequest) (output interface{}, err error) { + interfaceID, err := fftypes.ParseUUID(cr.ctx, r.PP["interfaceId"]) + if err != nil { + return nil, err + } + return nil, cr.or.Contracts().DeleteFFI(cr.ctx, interfaceID) + }, + }, +} diff --git a/internal/apiserver/route_delete_contract_interface_test.go b/internal/apiserver/route_delete_contract_interface_test.go new file mode 100644 index 0000000000..a4daf087b1 --- /dev/null +++ b/internal/apiserver/route_delete_contract_interface_test.go @@ -0,0 +1,56 @@ +// Copyright © 2021 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package apiserver + +import ( + "fmt" + "net/http/httptest" + "testing" + + "github.com/hyperledger/firefly-common/pkg/fftypes" + "github.com/hyperledger/firefly/mocks/contractmocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestDeleteContractInterface(t *testing.T) { + o, r := newTestAPIServer() + o.On("Authorize", mock.Anything, mock.Anything).Return(nil) + mcm := &contractmocks.Manager{} + o.On("Contracts").Return(mcm) + u := fftypes.NewUUID() + req := httptest.NewRequest("DELETE", fmt.Sprintf("/api/v1/namespaces/ns1/contracts/interfaces/%s", u), nil) + req.Header.Set("Content-Type", "application/json; charset=utf-8") + res := httptest.NewRecorder() + + mcm.On("DeleteFFI", mock.Anything, u).Return(nil) + r.ServeHTTP(res, req) + + assert.Equal(t, 204, res.Result().StatusCode) +} + +func TestDeleteContractInterfaceBadID(t *testing.T) { + o, r := newTestAPIServer() + o.On("Authorize", mock.Anything, mock.Anything).Return(nil) + req := httptest.NewRequest("DELETE", fmt.Sprintf("/api/v1/namespaces/ns1/contracts/interfaces/bad"), nil) + req.Header.Set("Content-Type", "application/json; charset=utf-8") + res := httptest.NewRecorder() + + r.ServeHTTP(res, req) + + assert.Equal(t, 400, res.Result().StatusCode) +} diff --git a/internal/apiserver/route_delete_token_pool.go b/internal/apiserver/route_delete_token_pool.go new file mode 100644 index 0000000000..b69c6d3ac0 --- /dev/null +++ b/internal/apiserver/route_delete_token_pool.go @@ -0,0 +1,43 @@ +// Copyright © 2023 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package apiserver + +import ( + "net/http" + + "github.com/hyperledger/firefly-common/pkg/ffapi" + "github.com/hyperledger/firefly/internal/coremsgs" +) + +var deleteTokenPool = &ffapi.Route{ + Name: "deleteTokenPool", + Path: "tokens/pools/{nameOrId}", + Method: http.MethodDelete, + PathParams: []*ffapi.PathParam{ + {Name: "nameOrId", Description: coremsgs.APIParamsTokenPoolNameOrID}, + }, + QueryParams: nil, + Description: coremsgs.APIEndpointsDeleteTokenPool, + JSONInputValue: nil, + JSONOutputValue: nil, + JSONOutputCodes: []int{http.StatusNoContent}, // Sync operation, no output + Extensions: &coreExtensions{ + CoreJSONHandler: func(r *ffapi.APIRequest, cr *coreRequest) (output interface{}, err error) { + return nil, cr.or.Assets().DeleteTokenPool(cr.ctx, r.PP["nameOrId"]) + }, + }, +} diff --git a/internal/apiserver/route_delete_token_pool_test.go b/internal/apiserver/route_delete_token_pool_test.go new file mode 100644 index 0000000000..b238b409bc --- /dev/null +++ b/internal/apiserver/route_delete_token_pool_test.go @@ -0,0 +1,44 @@ +// Copyright © 2021 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package apiserver + +import ( + "fmt" + "net/http/httptest" + "testing" + + "github.com/hyperledger/firefly-common/pkg/fftypes" + "github.com/hyperledger/firefly/mocks/assetmocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestDeleteTokenPool(t *testing.T) { + o, r := newTestAPIServer() + o.On("Authorize", mock.Anything, mock.Anything).Return(nil) + mam := &assetmocks.Manager{} + o.On("Assets").Return(mam) + u := fftypes.NewUUID() + req := httptest.NewRequest("DELETE", fmt.Sprintf("/api/v1/namespaces/ns1/tokens/pools/%s", u), nil) + req.Header.Set("Content-Type", "application/json; charset=utf-8") + res := httptest.NewRecorder() + + mam.On("DeleteTokenPool", mock.Anything, u.String()).Return(nil) + r.ServeHTTP(res, req) + + assert.Equal(t, 204, res.Result().StatusCode) +} diff --git a/internal/apiserver/route_get_contract_api_by_name_test.go b/internal/apiserver/route_get_contract_api_by_name_test.go index 4c9cd6c72a..a31de8e458 100644 --- a/internal/apiserver/route_get_contract_api_by_name_test.go +++ b/internal/apiserver/route_get_contract_api_by_name_test.go @@ -40,7 +40,7 @@ func TestGetContractAPIByName(t *testing.T) { req.Header.Set("Content-Type", "application/json; charset=utf-8") res := httptest.NewRecorder() - mcm.On("GetContractAPI", mock.Anything, "http://127.0.0.1:5000/api/v1", "banana"). + mcm.On("GetContractAPI", mock.Anything, "http://127.0.0.1:5000/api/v1/namespaces/ns1", "banana"). Return(&core.ContractAPI{}, nil) r.ServeHTTP(res, req) diff --git a/internal/apiserver/route_get_contract_apis_test.go b/internal/apiserver/route_get_contract_apis_test.go index c9105c3720..479df4e6f8 100644 --- a/internal/apiserver/route_get_contract_apis_test.go +++ b/internal/apiserver/route_get_contract_apis_test.go @@ -40,7 +40,7 @@ func TestGetContractAPIs(t *testing.T) { req.Header.Set("Content-Type", "application/json; charset=utf-8") res := httptest.NewRecorder() - mcm.On("GetContractAPIs", mock.Anything, "http://127.0.0.1:5000/api/v1", mock.Anything). + mcm.On("GetContractAPIs", mock.Anything, "http://127.0.0.1:5000/api/v1/namespaces/ns1", mock.Anything). Return([]*core.ContractAPI{}, nil, nil) r.ServeHTTP(res, req) diff --git a/internal/apiserver/route_get_contract_interface_name_version.go b/internal/apiserver/route_get_contract_interface_name_version.go index 4c2d5d90f7..c995da4ca5 100644 --- a/internal/apiserver/route_get_contract_interface_name_version.go +++ b/internal/apiserver/route_get_contract_interface_name_version.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // diff --git a/internal/apiserver/route_get_events_test.go b/internal/apiserver/route_get_events_test.go index be6a567b1d..d5ccc36ef4 100644 --- a/internal/apiserver/route_get_events_test.go +++ b/internal/apiserver/route_get_events_test.go @@ -61,7 +61,7 @@ func TestGetEventsWithReferences(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, resWithCount.Items) assert.Equal(t, int64(0), resWithCount.Count) - assert.Equal(t, int64(10), resWithCount.Total) + assert.Equal(t, int64(10), *resWithCount.Total) } func TestGetEventsWithFetchReference(t *testing.T) { @@ -84,5 +84,5 @@ func TestGetEventsWithFetchReference(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, resWithCount.Items) assert.Equal(t, int64(0), resWithCount.Count) - assert.Equal(t, int64(10), resWithCount.Total) + assert.Equal(t, int64(10), *resWithCount.Total) } diff --git a/internal/apiserver/route_get_msgs_test.go b/internal/apiserver/route_get_msgs_test.go index 75b6648e3f..e055c3e360 100644 --- a/internal/apiserver/route_get_msgs_test.go +++ b/internal/apiserver/route_get_msgs_test.go @@ -61,7 +61,7 @@ func TestGetMessagesWithCount(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, resWithCount.Items) assert.Equal(t, int64(0), resWithCount.Count) - assert.Equal(t, int64(10), resWithCount.Total) + assert.Equal(t, int64(10), *resWithCount.Total) } func TestGetMessagesWithCountAndData(t *testing.T) { @@ -84,5 +84,5 @@ func TestGetMessagesWithCountAndData(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, resWithCount.Items) assert.Equal(t, int64(0), resWithCount.Count) - assert.Equal(t, int64(10), resWithCount.Total) + assert.Equal(t, int64(10), *resWithCount.Total) } diff --git a/internal/apiserver/route_get_op_by_id.go b/internal/apiserver/route_get_op_by_id.go index 4c61d79305..46a6408afd 100644 --- a/internal/apiserver/route_get_op_by_id.go +++ b/internal/apiserver/route_get_op_by_id.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -37,7 +37,7 @@ var getOpByID = &ffapi.Route{ }, Description: coremsgs.APIEndpointsGetOpByID, JSONInputValue: nil, - JSONOutputValue: func() interface{} { return &core.Operation{} }, + JSONOutputValue: func() interface{} { return &core.OperationWithDetail{} }, JSONOutputCodes: []int{http.StatusOK}, Extensions: &coreExtensions{ CoreJSONHandler: func(r *ffapi.APIRequest, cr *coreRequest) (output interface{}, err error) { diff --git a/internal/apiserver/route_post_contract_api_publish.go b/internal/apiserver/route_post_contract_api_publish.go new file mode 100644 index 0000000000..068c707169 --- /dev/null +++ b/internal/apiserver/route_post_contract_api_publish.go @@ -0,0 +1,51 @@ +// Copyright © 2023 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package apiserver + +import ( + "net/http" + "strings" + + "github.com/hyperledger/firefly-common/pkg/ffapi" + "github.com/hyperledger/firefly-common/pkg/fftypes" + "github.com/hyperledger/firefly/internal/coremsgs" + "github.com/hyperledger/firefly/pkg/core" +) + +var postContractAPIPublish = &ffapi.Route{ + Name: "postContractAPIPublish", + Path: "apis/{apiName}/publish", + Method: http.MethodPost, + PathParams: []*ffapi.PathParam{ + {Name: "apiName", Description: coremsgs.APIParamsContractAPIName}, + }, + QueryParams: []*ffapi.QueryParam{ + {Name: "confirm", Description: coremsgs.APIConfirmQueryParam, IsBool: true}, + }, + Description: coremsgs.APIEndpointsPostContractAPIPublish, + JSONInputValue: func() interface{} { return &core.DefinitionPublish{} }, + JSONOutputValue: func() interface{} { return &fftypes.FFI{} }, + JSONOutputCodes: []int{http.StatusAccepted, http.StatusOK}, + Extensions: &coreExtensions{ + CoreJSONHandler: func(r *ffapi.APIRequest, cr *coreRequest) (output interface{}, err error) { + waitConfirm := strings.EqualFold(r.QP["confirm"], "true") + r.SuccessStatus = syncRetcode(waitConfirm) + input := r.Input.(*core.DefinitionPublish) + return cr.or.DefinitionSender().PublishContractAPI(cr.ctx, cr.apiBaseURL, r.PP["apiName"], input.NetworkName, waitConfirm) + }, + }, +} diff --git a/internal/apiserver/route_post_contract_api_publish_test.go b/internal/apiserver/route_post_contract_api_publish_test.go new file mode 100644 index 0000000000..d40529ac94 --- /dev/null +++ b/internal/apiserver/route_post_contract_api_publish_test.go @@ -0,0 +1,48 @@ +// Copyright © 2021 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package apiserver + +import ( + "bytes" + "encoding/json" + "net/http/httptest" + "testing" + + "github.com/hyperledger/firefly/mocks/definitionsmocks" + "github.com/hyperledger/firefly/pkg/core" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestPostContractAPIPublish(t *testing.T) { + o, r := newTestAPIServer() + o.On("Authorize", mock.Anything, mock.Anything).Return(nil) + mds := &definitionsmocks.Sender{} + o.On("DefinitionSender").Return(mds) + input := core.DefinitionPublish{NetworkName: "banana-net"} + var buf bytes.Buffer + json.NewEncoder(&buf).Encode(&input) + req := httptest.NewRequest("POST", "/api/v1/namespaces/ns1/apis/banana/publish", &buf) + req.Header.Set("Content-Type", "application/json; charset=utf-8") + res := httptest.NewRecorder() + api := &core.ContractAPI{} + + mds.On("PublishContractAPI", mock.Anything, "http://127.0.0.1:5000/api/v1/namespaces/ns1", "banana", "banana-net", false).Return(api, nil) + r.ServeHTTP(res, req) + + assert.Equal(t, 202, res.Result().StatusCode) +} diff --git a/internal/apiserver/route_post_contract_interface_publish.go b/internal/apiserver/route_post_contract_interface_publish.go new file mode 100644 index 0000000000..1f161f8833 --- /dev/null +++ b/internal/apiserver/route_post_contract_interface_publish.go @@ -0,0 +1,52 @@ +// Copyright © 2023 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package apiserver + +import ( + "net/http" + "strings" + + "github.com/hyperledger/firefly-common/pkg/ffapi" + "github.com/hyperledger/firefly-common/pkg/fftypes" + "github.com/hyperledger/firefly/internal/coremsgs" + "github.com/hyperledger/firefly/pkg/core" +) + +var postContractInterfacePublish = &ffapi.Route{ + Name: "postContractInterfacePublish", + Path: "contracts/interfaces/{name}/{version}/publish", + Method: http.MethodPost, + PathParams: []*ffapi.PathParam{ + {Name: "name", Description: coremsgs.APIParamsContractInterfaceName}, + {Name: "version", Description: coremsgs.APIParamsContractInterfaceVersion}, + }, + QueryParams: []*ffapi.QueryParam{ + {Name: "confirm", Description: coremsgs.APIConfirmQueryParam, IsBool: true}, + }, + Description: coremsgs.APIEndpointsPostContractInterfacePublish, + JSONInputValue: func() interface{} { return &core.DefinitionPublish{} }, + JSONOutputValue: func() interface{} { return &fftypes.FFI{} }, + JSONOutputCodes: []int{http.StatusAccepted, http.StatusOK}, + Extensions: &coreExtensions{ + CoreJSONHandler: func(r *ffapi.APIRequest, cr *coreRequest) (output interface{}, err error) { + waitConfirm := strings.EqualFold(r.QP["confirm"], "true") + r.SuccessStatus = syncRetcode(waitConfirm) + input := r.Input.(*core.DefinitionPublish) + return cr.or.DefinitionSender().PublishFFI(cr.ctx, r.PP["name"], r.PP["version"], input.NetworkName, waitConfirm) + }, + }, +} diff --git a/internal/apiserver/route_post_contract_interface_publish_test.go b/internal/apiserver/route_post_contract_interface_publish_test.go new file mode 100644 index 0000000000..16f9279c83 --- /dev/null +++ b/internal/apiserver/route_post_contract_interface_publish_test.go @@ -0,0 +1,49 @@ +// Copyright © 2021 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package apiserver + +import ( + "bytes" + "encoding/json" + "net/http/httptest" + "testing" + + "github.com/hyperledger/firefly-common/pkg/fftypes" + "github.com/hyperledger/firefly/mocks/definitionsmocks" + "github.com/hyperledger/firefly/pkg/core" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestPostContractInterfacePublish(t *testing.T) { + o, r := newTestAPIServer() + o.On("Authorize", mock.Anything, mock.Anything).Return(nil) + mds := &definitionsmocks.Sender{} + o.On("DefinitionSender").Return(mds) + input := core.TokenPool{} + var buf bytes.Buffer + json.NewEncoder(&buf).Encode(&input) + req := httptest.NewRequest("POST", "/api/v1/namespaces/ns1/contracts/interfaces/ffi1/1.0/publish", &buf) + req.Header.Set("Content-Type", "application/json; charset=utf-8") + res := httptest.NewRecorder() + ffi := &fftypes.FFI{} + + mds.On("PublishFFI", mock.Anything, "ffi1", "1.0", "", false).Return(ffi, nil) + r.ServeHTTP(res, req) + + assert.Equal(t, 202, res.Result().StatusCode) +} diff --git a/internal/apiserver/route_post_new_contract_api.go b/internal/apiserver/route_post_new_contract_api.go index 21a6c3526c..e3f076d133 100644 --- a/internal/apiserver/route_post_new_contract_api.go +++ b/internal/apiserver/route_post_new_contract_api.go @@ -33,6 +33,7 @@ var postNewContractAPI = &ffapi.Route{ PathParams: nil, QueryParams: []*ffapi.QueryParam{ {Name: "confirm", Description: coremsgs.APIConfirmQueryParam, IsBool: true, Example: "true"}, + {Name: "publish", Description: coremsgs.APIPublishQueryParam, IsBool: true}, }, Description: coremsgs.APIEndpointsPostNewContractAPI, JSONInputValue: func() interface{} { return &core.ContractAPI{} }, @@ -47,6 +48,7 @@ var postNewContractAPI = &ffapi.Route{ r.SuccessStatus = syncRetcode(waitConfirm) api := r.Input.(*core.ContractAPI) api.ID = nil + api.Published = strings.EqualFold(r.QP["publish"], "true") err = cr.or.DefinitionSender().DefineContractAPI(cr.ctx, cr.apiBaseURL, api, waitConfirm) return api, err }, diff --git a/internal/apiserver/route_post_new_contract_interface.go b/internal/apiserver/route_post_new_contract_interface.go index 1b408739ab..5a02ad8473 100644 --- a/internal/apiserver/route_post_new_contract_interface.go +++ b/internal/apiserver/route_post_new_contract_interface.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -33,6 +33,7 @@ var postNewContractInterface = &ffapi.Route{ PathParams: nil, QueryParams: []*ffapi.QueryParam{ {Name: "confirm", Description: coremsgs.APIConfirmQueryParam, IsBool: true, Example: "true"}, + {Name: "publish", Description: coremsgs.APIPublishQueryParam, IsBool: true}, }, Description: coremsgs.APIEndpointsPostNewContractInterface, JSONInputValue: func() interface{} { return &fftypes.FFI{} }, @@ -46,6 +47,7 @@ var postNewContractInterface = &ffapi.Route{ waitConfirm := strings.EqualFold(r.QP["confirm"], "true") r.SuccessStatus = syncRetcode(waitConfirm) ffi := r.Input.(*fftypes.FFI) + ffi.Published = strings.EqualFold(r.QP["publish"], "true") err = cr.or.DefinitionSender().DefineFFI(cr.ctx, ffi, waitConfirm) return ffi, err }, diff --git a/internal/apiserver/route_post_new_organization.go b/internal/apiserver/route_post_new_organization.go index 1309e17b78..487b656312 100644 --- a/internal/apiserver/route_post_new_organization.go +++ b/internal/apiserver/route_post_new_organization.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -22,6 +22,7 @@ import ( "github.com/hyperledger/firefly-common/pkg/ffapi" "github.com/hyperledger/firefly/internal/coremsgs" + "github.com/hyperledger/firefly/internal/orchestrator" "github.com/hyperledger/firefly/pkg/core" ) @@ -38,6 +39,9 @@ var postNewOrganization = &ffapi.Route{ JSONOutputValue: func() interface{} { return &core.Identity{} }, JSONOutputCodes: []int{http.StatusAccepted, http.StatusOK}, Extensions: &coreExtensions{ + EnabledIf: func(or orchestrator.Orchestrator) bool { + return or.MultiParty() != nil + }, CoreJSONHandler: func(r *ffapi.APIRequest, cr *coreRequest) (output interface{}, err error) { waitConfirm := strings.EqualFold(r.QP["confirm"], "true") r.SuccessStatus = syncRetcode(waitConfirm) diff --git a/internal/apiserver/route_post_new_organization_test.go b/internal/apiserver/route_post_new_organization_test.go index e30912d5c9..c0b11b0e01 100644 --- a/internal/apiserver/route_post_new_organization_test.go +++ b/internal/apiserver/route_post_new_organization_test.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -22,6 +22,7 @@ import ( "net/http/httptest" "testing" + "github.com/hyperledger/firefly/mocks/multipartymocks" "github.com/hyperledger/firefly/mocks/networkmapmocks" "github.com/hyperledger/firefly/pkg/core" "github.com/stretchr/testify/assert" @@ -33,6 +34,7 @@ func TestNewOrganization(t *testing.T) { o.On("Authorize", mock.Anything, mock.Anything).Return(nil) mnm := &networkmapmocks.Manager{} o.On("NetworkMap").Return(mnm) + o.On("MultiParty").Return(&multipartymocks.Manager{}) input := core.Identity{} var buf bytes.Buffer json.NewEncoder(&buf).Encode(&input) diff --git a/internal/apiserver/route_post_token_pool.go b/internal/apiserver/route_post_token_pool.go index 42b4b40b38..e5e20fa15b 100644 --- a/internal/apiserver/route_post_token_pool.go +++ b/internal/apiserver/route_post_token_pool.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -32,6 +32,7 @@ var postTokenPool = &ffapi.Route{ PathParams: nil, QueryParams: []*ffapi.QueryParam{ {Name: "confirm", Description: coremsgs.APIConfirmQueryParam, IsBool: true}, + {Name: "publish", Description: coremsgs.APIPublishQueryParam, IsBool: true}, }, Description: coremsgs.APIEndpointsPostTokenPool, JSONInputValue: func() interface{} { return &core.TokenPoolInput{} }, @@ -41,7 +42,9 @@ var postTokenPool = &ffapi.Route{ CoreJSONHandler: func(r *ffapi.APIRequest, cr *coreRequest) (output interface{}, err error) { waitConfirm := strings.EqualFold(r.QP["confirm"], "true") r.SuccessStatus = syncRetcode(waitConfirm) - return cr.or.Assets().CreateTokenPool(cr.ctx, r.Input.(*core.TokenPoolInput), waitConfirm) + pool := r.Input.(*core.TokenPoolInput) + pool.Published = strings.EqualFold(r.QP["publish"], "true") + return cr.or.Assets().CreateTokenPool(cr.ctx, pool, waitConfirm) }, }, } diff --git a/internal/apiserver/route_post_token_pool_publish.go b/internal/apiserver/route_post_token_pool_publish.go new file mode 100644 index 0000000000..5abd25927e --- /dev/null +++ b/internal/apiserver/route_post_token_pool_publish.go @@ -0,0 +1,50 @@ +// Copyright © 2023 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package apiserver + +import ( + "net/http" + "strings" + + "github.com/hyperledger/firefly-common/pkg/ffapi" + "github.com/hyperledger/firefly/internal/coremsgs" + "github.com/hyperledger/firefly/pkg/core" +) + +var postTokenPoolPublish = &ffapi.Route{ + Name: "postTokenPoolPublish", + Path: "tokens/pools/{nameOrId}/publish", + Method: http.MethodPost, + PathParams: []*ffapi.PathParam{ + {Name: "nameOrId", Description: coremsgs.APIParamsTokenPoolNameOrID}, + }, + QueryParams: []*ffapi.QueryParam{ + {Name: "confirm", Description: coremsgs.APIConfirmQueryParam, IsBool: true}, + }, + Description: coremsgs.APIEndpointsPostTokenPoolPublish, + JSONInputValue: func() interface{} { return &core.DefinitionPublish{} }, + JSONOutputValue: func() interface{} { return &core.TokenPool{} }, + JSONOutputCodes: []int{http.StatusAccepted, http.StatusOK}, + Extensions: &coreExtensions{ + CoreJSONHandler: func(r *ffapi.APIRequest, cr *coreRequest) (output interface{}, err error) { + waitConfirm := strings.EqualFold(r.QP["confirm"], "true") + r.SuccessStatus = syncRetcode(waitConfirm) + input := r.Input.(*core.DefinitionPublish) + return cr.or.DefinitionSender().PublishTokenPool(cr.ctx, r.PP["nameOrId"], input.NetworkName, waitConfirm) + }, + }, +} diff --git a/internal/apiserver/route_post_token_pool_publish_test.go b/internal/apiserver/route_post_token_pool_publish_test.go new file mode 100644 index 0000000000..fc9aad0b86 --- /dev/null +++ b/internal/apiserver/route_post_token_pool_publish_test.go @@ -0,0 +1,48 @@ +// Copyright © 2021 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package apiserver + +import ( + "bytes" + "encoding/json" + "net/http/httptest" + "testing" + + "github.com/hyperledger/firefly/mocks/definitionsmocks" + "github.com/hyperledger/firefly/pkg/core" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestPostTokenPoolPublish(t *testing.T) { + o, r := newTestAPIServer() + o.On("Authorize", mock.Anything, mock.Anything).Return(nil) + mds := &definitionsmocks.Sender{} + o.On("DefinitionSender").Return(mds) + input := core.TokenPool{} + var buf bytes.Buffer + json.NewEncoder(&buf).Encode(&input) + req := httptest.NewRequest("POST", "/api/v1/namespaces/ns1/tokens/pools/pool1/publish", &buf) + req.Header.Set("Content-Type", "application/json; charset=utf-8") + res := httptest.NewRecorder() + pool := &core.TokenPool{} + + mds.On("PublishTokenPool", mock.Anything, "pool1", "", false).Return(pool, nil) + r.ServeHTTP(res, req) + + assert.Equal(t, 202, res.Result().StatusCode) +} diff --git a/internal/apiserver/routes.go b/internal/apiserver/routes.go index a878504041..58e6e61664 100644 --- a/internal/apiserver/routes.go +++ b/internal/apiserver/routes.go @@ -45,6 +45,7 @@ const ( routeTagNonDefaultNamespace = "Non-Default Namespace" ) +var nsRoutes = []*ffapi.Route{} var routes = append( globalRoutes([]*ffapi.Route{ getNamespace, @@ -52,9 +53,12 @@ var routes = append( getWebSockets, }), namespacedRoutes([]*ffapi.Route{ + deleteContractAPI, + deleteContractInterface, deleteContractListener, deleteData, deleteSubscription, + deleteTokenPool, getBatchByID, getBatches, getBlockchainEventByID, @@ -124,9 +128,11 @@ var routes = append( getVerifiers, patchUpdateIdentity, postContractAPIInvoke, + postContractAPIPublish, postContractAPIQuery, postContractAPIListeners, postContractInterfaceGenerate, + postContractInterfacePublish, postContractDeploy, postContractInvoke, postContractQuery, @@ -152,6 +158,7 @@ var routes = append( postTokenBurn, postTokenMint, postTokenPool, + postTokenPoolPublish, postTokenTransfer, putContractAPI, putSubscription, @@ -171,14 +178,18 @@ func namespacedRoutes(routes []*ffapi.Route) []*ffapi.Route { for i, route := range routes { route.Tag = routeTagDefaultNamespace - routeCopy := *route - routeCopy.Name += "Namespace" - routeCopy.Path = "namespaces/{ns}/" + route.Path - routeCopy.PathParams = append(routeCopy.PathParams, &ffapi.PathParam{ + routeCopy1 := *route + routeCopy1.Name += "Namespace" + routeCopy1.Path = "namespaces/{ns}/" + route.Path + routeCopy1.PathParams = append(routeCopy1.PathParams, &ffapi.PathParam{ Name: "ns", ExampleFromConf: coreconfig.NamespacesDefault, Description: coremsgs.APIParamsNamespace, }) - routeCopy.Tag = routeTagNonDefaultNamespace - newRoutes[i] = &routeCopy + routeCopy1.Tag = routeTagNonDefaultNamespace + newRoutes[i] = &routeCopy1 + + // Build a separate list of NS relative routes, to build a swagger limited to one namespace + routeCopy2 := *route + nsRoutes = append(nsRoutes, &routeCopy2) } return append(routes, newRoutes...) } diff --git a/internal/apiserver/server.go b/internal/apiserver/server.go index 343e204789..46a57ba22f 100644 --- a/internal/apiserver/server.go +++ b/internal/apiserver/server.go @@ -18,15 +18,13 @@ package apiserver import ( "context" - "encoding/json" "fmt" "net/http" + "strings" "time" - "github.com/ghodss/yaml" "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/getkin/kin-openapi/openapi3" "github.com/gorilla/mux" "github.com/hyperledger/firefly-common/pkg/config" "github.com/hyperledger/firefly-common/pkg/ffapi" @@ -58,10 +56,12 @@ type Server interface { type apiServer struct { // Defaults set with config - apiTimeout time.Duration - apiMaxTimeout time.Duration - metricsEnabled bool - ffiSwaggerGen FFISwaggerGen + apiTimeout time.Duration + apiMaxTimeout time.Duration + metricsEnabled bool + ffiSwaggerGen FFISwaggerGen + apiPublicURL string + dynamicPublicURLHeader string } func InitConfig() { @@ -73,12 +73,15 @@ func InitConfig() { } func NewAPIServer() Server { - return &apiServer{ - apiTimeout: config.GetDuration(coreconfig.APIRequestTimeout), - apiMaxTimeout: config.GetDuration(coreconfig.APIRequestMaxTimeout), - metricsEnabled: config.GetBool(coreconfig.MetricsEnabled), - ffiSwaggerGen: NewFFISwaggerGen(), + as := &apiServer{ + apiTimeout: config.GetDuration(coreconfig.APIRequestTimeout), + apiMaxTimeout: config.GetDuration(coreconfig.APIRequestMaxTimeout), + dynamicPublicURLHeader: config.GetString(coreconfig.APIDynamicPublicURLHeader), + metricsEnabled: config.GetBool(coreconfig.MetricsEnabled), + ffiSwaggerGen: &ffiSwaggerGen{}, } + as.apiPublicURL = as.getPublicURL(apiConfig, "") + return as } // Serve is the main entry point for the API Server @@ -132,7 +135,7 @@ func (as *apiServer) waitForServerStop(httpErrChan, spiErrChan, metricsErrChan c } func (as *apiServer) getPublicURL(conf config.Section, pathPrefix string) string { - publicURL := conf.GetString(httpserver.HTTPConfPublicURL) + publicURL := strings.TrimSuffix(conf.GetString(httpserver.HTTPConfPublicURL), "/") if publicURL == "" { proto := "https" tlsSection := conf.SubSection("tls") @@ -147,71 +150,6 @@ func (as *apiServer) getPublicURL(conf config.Section, pathPrefix string) string return publicURL } -func (as *apiServer) swaggerGenConf(apiBaseURL string) *ffapi.Options { - return &ffapi.Options{ - BaseURL: apiBaseURL, - Title: "FireFly", - Version: "1.0", - PanicOnMissingDescription: config.GetBool(coreconfig.APIOASPanicOnMissingDescription), - DefaultRequestTimeout: config.GetDuration(coreconfig.APIRequestTimeout), - APIDefaultFilterLimit: config.GetString(coreconfig.APIDefaultFilterLimit), - APIMaxFilterLimit: config.GetUint(coreconfig.APIMaxFilterLimit), - APIMaxFilterSkip: config.GetUint(coreconfig.APIMaxFilterSkip), - } -} - -func (as *apiServer) swaggerHandler(generator func(req *http.Request) (*openapi3.T, error)) func(res http.ResponseWriter, req *http.Request) (status int, err error) { - return func(res http.ResponseWriter, req *http.Request) (status int, err error) { - vars := mux.Vars(req) - doc, err := generator(req) - if err != nil { - return 500, err - } - if vars["ext"] == ".json" { - res.Header().Add("Content-Type", "application/json") - b, _ := json.Marshal(&doc) - _, _ = res.Write(b) - } else { - res.Header().Add("Content-Type", "application/x-yaml") - b, _ := yaml.Marshal(&doc) - _, _ = res.Write(b) - } - return 200, nil - } -} - -func (as *apiServer) swaggerGenerator(routes []*ffapi.Route, apiBaseURL string) func(req *http.Request) (*openapi3.T, error) { - swg := ffapi.NewSwaggerGen(as.swaggerGenConf(apiBaseURL)) - return func(req *http.Request) (*openapi3.T, error) { - return swg.Generate(req.Context(), routes), nil - } -} - -func (as *apiServer) contractSwaggerGenerator(mgr namespace.Manager, apiBaseURL string) func(req *http.Request) (*openapi3.T, error) { - return func(req *http.Request) (*openapi3.T, error) { - vars := mux.Vars(req) - or, err := mgr.Orchestrator(req.Context(), vars["ns"], false) - if err != nil { - return nil, err - } - cm := or.Contracts() - api, err := cm.GetContractAPI(req.Context(), apiBaseURL, vars["apiName"]) - if err != nil { - return nil, err - } else if api == nil || api.Interface == nil { - return nil, i18n.NewError(req.Context(), coremsgs.Msg404NoResult) - } - - ffi, err := cm.GetFFIByIDWithChildren(req.Context(), api.Interface.ID) - if err != nil { - return nil, err - } - - baseURL := fmt.Sprintf("%s/namespaces/%s/apis/%s", apiBaseURL, vars["ns"], vars["apiName"]) - return as.ffiSwaggerGen.Generate(req.Context(), baseURL, api, ffi), nil - } -} - func getOrchestrator(ctx context.Context, mgr namespace.Manager, tag string, r *ffapi.APIRequest) (or orchestrator.Orchestrator, err error) { switch tag { case routeTagDefaultNamespace: @@ -227,7 +165,35 @@ func getOrchestrator(ctx context.Context, mgr namespace.Manager, tag string, r * return nil, i18n.NewError(ctx, coremsgs.MsgMissingNamespace) } -func (as *apiServer) routeHandler(hf *ffapi.HandlerFactory, mgr namespace.Manager, apiBaseURL string, route *ffapi.Route) http.HandlerFunc { +func (as *apiServer) baseSwaggerGenOptions() ffapi.SwaggerGenOptions { + return ffapi.SwaggerGenOptions{ + Title: "Hyperledger FireFly", + Version: "1.0", + PanicOnMissingDescription: config.GetBool(coreconfig.APIOASPanicOnMissingDescription), + DefaultRequestTimeout: config.GetDuration(coreconfig.APIRequestTimeout), + APIDefaultFilterLimit: config.GetString(coreconfig.APIDefaultFilterLimit), + APIMaxFilterLimit: config.GetUint(coreconfig.APIMaxFilterLimit), + APIMaxFilterSkip: config.GetUint(coreconfig.APIMaxFilterSkip), + } +} + +func (as *apiServer) getBaseURL(req *http.Request) string { + var baseURL string + if as.dynamicPublicURLHeader != "" { + baseURL = req.Header.Get(as.dynamicPublicURLHeader) + if baseURL != "" { + return baseURL + } + } + baseURL = strings.TrimSuffix(as.apiPublicURL, "/") + "/api/v1" + vars := mux.Vars(req) + if ns, ok := vars["ns"]; ok && ns != "" { + baseURL += `/namespaces/` + ns + } + return baseURL +} + +func (as *apiServer) routeHandler(hf *ffapi.HandlerFactory, mgr namespace.Manager, fixedBaseURL string, route *ffapi.Route) http.HandlerFunc { // We extend the base ffapi functionality, with standardized DB filter support for all core resources. // We also pass the Orchestrator context through ce := route.Extensions.(*coreExtensions) @@ -253,6 +219,10 @@ func (as *apiServer) routeHandler(hf *ffapi.HandlerFactory, mgr namespace.Manage return nil, i18n.NewError(r.Req.Context(), coremsgs.MsgActionNotSupported) } + apiBaseURL := fixedBaseURL // for SPI + if apiBaseURL == "" { + apiBaseURL = as.getBaseURL(r.Req) + } cr := &coreRequest{ mgr: mgr, or: or, @@ -271,6 +241,10 @@ func (as *apiServer) routeHandler(hf *ffapi.HandlerFactory, mgr namespace.Manage return nil, i18n.NewError(r.Req.Context(), coremsgs.MsgActionNotSupported) } + apiBaseURL := fixedBaseURL // for SPI + if apiBaseURL == "" { + apiBaseURL = as.getBaseURL(r.Req) + } cr := &coreRequest{ mgr: mgr, or: or, @@ -294,6 +268,74 @@ func (as *apiServer) handlerFactory() *ffapi.HandlerFactory { } } +// For namespace relative APIs, we add the resolved namespace to the public URL of the swagger +// generator (so it can be fully replaced by the API Gateway routing HTTP Header), +// and generate the API itself as if it's root is at the namespace level. +// +// This gives a clean namespace scoped swagger for apps interested in just working with +// a single namespace. +func (as *apiServer) nsOpenAPIHandlerFactory(req *http.Request, publicURL string) *ffapi.OpenAPIHandlerFactory { + vars := mux.Vars(req) + return &ffapi.OpenAPIHandlerFactory{ + BaseSwaggerGenOptions: as.baseSwaggerGenOptions(), + StaticPublicURL: publicURL + "/api/v1/namespaces/" + vars["ns"], + DynamicPublicURLHeader: as.dynamicPublicURLHeader, + } +} + +func (as *apiServer) namespacedSwaggerHandler(hf *ffapi.HandlerFactory, r *mux.Router, publicURL, relativePath string, format ffapi.OpenAPIFormat) { + r.HandleFunc(`/api/v1/namespaces/{ns}`+relativePath, hf.APIWrapper(func(res http.ResponseWriter, req *http.Request) (status int, err error) { + return as.nsOpenAPIHandlerFactory(req, publicURL).OpenAPIHandler("", ffapi.OpenAPIFormatJSON, nsRoutes)(res, req) + })) +} + +func (as *apiServer) namespacedSwaggerUI(hf *ffapi.HandlerFactory, r *mux.Router, publicURL, relativePath string) { + r.HandleFunc(`/api/v1/namespaces/{ns}`+relativePath, hf.APIWrapper(func(res http.ResponseWriter, req *http.Request) (status int, err error) { + return as.nsOpenAPIHandlerFactory(req, publicURL).SwaggerUIHandler(`/api/openapi.yaml`)(res, req) + })) +} + +func (as *apiServer) namespacedContractSwaggerGenerator(hf *ffapi.HandlerFactory, r *mux.Router, mgr namespace.Manager, publicURL, relativePath string, format ffapi.OpenAPIFormat) { + r.HandleFunc(`/api/v1/namespaces/{ns}/apis/{apiName}`+relativePath, hf.APIWrapper(func(res http.ResponseWriter, req *http.Request) (status int, err error) { + vars := mux.Vars(req) + or, err := mgr.Orchestrator(req.Context(), vars["ns"], false) + if err != nil { + return -1, err + } + apiBaseURL := as.getBaseURL(req) + cm := or.Contracts() + api, err := cm.GetContractAPI(req.Context(), apiBaseURL, vars["apiName"]) + if err != nil { + return -1, err + } else if api == nil || api.Interface == nil { + return -1, i18n.NewError(req.Context(), coremsgs.Msg404NoResult) + } + + ffi, err := cm.GetFFIByIDWithChildren(req.Context(), api.Interface.ID) + if err != nil { + return -1, err + } + + options, routes := as.ffiSwaggerGen.Build(req.Context(), api, ffi) + return (&ffapi.OpenAPIHandlerFactory{ + BaseSwaggerGenOptions: *options, + StaticPublicURL: apiBaseURL, + DynamicPublicURLHeader: as.dynamicPublicURLHeader, + }).OpenAPIHandler(fmt.Sprintf("/apis/%s", vars["apiName"]), format, routes)(res, req) + })) +} + +func (as *apiServer) namespacedContractSwaggerUI(hf *ffapi.HandlerFactory, r *mux.Router, publicURL, relativePath string) { + r.HandleFunc(`/api/v1/namespaces/{ns}/apis/{apiName}`+relativePath, hf.APIWrapper(func(res http.ResponseWriter, req *http.Request) (status int, err error) { + vars := mux.Vars(req) + oaf := &ffapi.OpenAPIHandlerFactory{ + StaticPublicURL: publicURL + "/api/v1/namespaces/" + vars["ns"], + DynamicPublicURLHeader: as.dynamicPublicURLHeader, + } + return oaf.SwaggerUIHandler(`/apis/`+vars["apiName"]+`/api/openapi.yaml`)(res, req) + })) +} + func (as *apiServer) createMuxRouter(ctx context.Context, mgr namespace.Manager) *mux.Router { r := mux.NewRouter() hf := as.handlerFactory() @@ -302,32 +344,50 @@ func (as *apiServer) createMuxRouter(ctx context.Context, mgr namespace.Manager) r.Use(metrics.GetRestServerInstrumentation().Middleware) } - publicURL := as.getPublicURL(apiConfig, "") - apiBaseURL := fmt.Sprintf("%s/api/v1", publicURL) for _, route := range routes { if ce, ok := route.Extensions.(*coreExtensions); ok { if ce.CoreJSONHandler != nil { - r.HandleFunc(fmt.Sprintf("/api/v1/%s", route.Path), as.routeHandler(hf, mgr, apiBaseURL, route)). + r.HandleFunc(fmt.Sprintf("/api/v1/%s", route.Path), as.routeHandler(hf, mgr, "", route)). Methods(route.Method) } } } - r.HandleFunc(`/api/v1/namespaces/{ns}/apis/{apiName}/api/swagger{ext:\.yaml|\.json|}`, hf.APIWrapper(as.swaggerHandler(as.contractSwaggerGenerator(mgr, apiBaseURL)))) - r.HandleFunc(`/api/v1/namespaces/{ns}/apis/{apiName}/api`, func(rw http.ResponseWriter, req *http.Request) { - url := req.URL.String() + "/swagger.yaml" - handler := hf.APIWrapper(hf.SwaggerUIHandler(url)) - handler(rw, req) - }) + // Swagger builder for the root + oaf := &ffapi.OpenAPIHandlerFactory{ + BaseSwaggerGenOptions: as.baseSwaggerGenOptions(), + StaticPublicURL: as.apiPublicURL, + DynamicPublicURLHeader: as.dynamicPublicURLHeader, + } + + // Root APIs + r.HandleFunc(`/api/swagger.json`, hf.APIWrapper(oaf.OpenAPIHandler(`/api/v1`, ffapi.OpenAPIFormatJSON, routes))) + r.HandleFunc(`/api/openapi.json`, hf.APIWrapper(oaf.OpenAPIHandler(`/api/v1`, ffapi.OpenAPIFormatJSON, routes))) + r.HandleFunc(`/api/swagger.yaml`, hf.APIWrapper(oaf.OpenAPIHandler(`/api/v1`, ffapi.OpenAPIFormatYAML, routes))) + r.HandleFunc(`/api/openapi.yaml`, hf.APIWrapper(oaf.OpenAPIHandler(`/api/v1`, ffapi.OpenAPIFormatYAML, routes))) + r.HandleFunc(`/api`, hf.APIWrapper(oaf.SwaggerUIHandler(`/api/openapi.yaml`))) + // Namespace relative APIs + as.namespacedSwaggerHandler(hf, r, as.apiPublicURL, `/api/swagger.json`, ffapi.OpenAPIFormatJSON) + as.namespacedSwaggerHandler(hf, r, as.apiPublicURL, `/api/openapi.json`, ffapi.OpenAPIFormatJSON) + as.namespacedSwaggerHandler(hf, r, as.apiPublicURL, `/api/swagger.yaml`, ffapi.OpenAPIFormatYAML) + as.namespacedSwaggerHandler(hf, r, as.apiPublicURL, `/api/openapi.yaml`, ffapi.OpenAPIFormatYAML) + as.namespacedSwaggerUI(hf, r, as.apiPublicURL, `/api`) + // Dynamic swagger for namespaced contract APIs + as.namespacedContractSwaggerGenerator(hf, r, mgr, as.apiPublicURL, `/api/swagger.json`, ffapi.OpenAPIFormatJSON) + as.namespacedContractSwaggerGenerator(hf, r, mgr, as.apiPublicURL, `/api/openapi.json`, ffapi.OpenAPIFormatJSON) + as.namespacedContractSwaggerGenerator(hf, r, mgr, as.apiPublicURL, `/api/swagger.yaml`, ffapi.OpenAPIFormatYAML) + as.namespacedContractSwaggerGenerator(hf, r, mgr, as.apiPublicURL, `/api/openapi.yaml`, ffapi.OpenAPIFormatYAML) + as.namespacedContractSwaggerUI(hf, r, as.apiPublicURL, `/api`) - r.HandleFunc(`/api/swagger{ext:\.yaml|\.json|}`, hf.APIWrapper(as.swaggerHandler(as.swaggerGenerator(routes, apiBaseURL)))) - r.HandleFunc(`/api`, hf.APIWrapper(hf.SwaggerUIHandler(publicURL+"/api/swagger.yaml"))) r.HandleFunc(`/favicon{any:.*}.png`, favIcons) ws, _ := eifactory.GetPlugin(ctx, "websockets") ws.(*websockets.WebSockets).SetAuthorizer(mgr) r.HandleFunc(`/ws`, ws.(*websockets.WebSockets).ServeHTTP) + // namespace scoped web sockets + r.HandleFunc("/api/v1/namespaces/{ns}/ws", hf.APIWrapper(getNamespacedWebSocketHandler(ws.(*websockets.WebSockets), mgr))) + uiPath := config.GetString(coreconfig.UIPath) if uiPath != "" && config.GetBool(coreconfig.UIEnabled) { r.PathPrefix(`/ui`).Handler(newStaticHandler(uiPath, "index.html", `/ui`)) @@ -337,6 +397,23 @@ func (as *apiServer) createMuxRouter(ctx context.Context, mgr namespace.Manager) return r } +func getNamespacedWebSocketHandler(ws websockets.WebSocketsNamespaced, mgr namespace.Manager) ffapi.HandlerFunction { + return func(res http.ResponseWriter, req *http.Request) (status int, err error) { + + vars := mux.Vars(req) + namespace := vars["ns"] + or, err := mgr.Orchestrator(req.Context(), namespace, false) + if err != nil || or == nil { + return 404, i18n.NewError(req.Context(), coremsgs.Msg404NotFound) + } + + ws.ServeHTTPNamespaced(namespace, res, req) + + return 200, nil + } + +} + func (as *apiServer) notFoundHandler(res http.ResponseWriter, req *http.Request) (status int, err error) { res.Header().Add("Content-Type", "application/json") return 404, i18n.NewError(req.Context(), coremsgs.Msg404NotFound) @@ -357,17 +434,28 @@ func (as *apiServer) createAdminMuxRouter(mgr namespace.Manager) *mux.Router { hf := as.handlerFactory() publicURL := as.getPublicURL(spiConfig, "spi") - apiBaseURL := fmt.Sprintf("%s/v1", publicURL) + spiBaseURL := fmt.Sprintf("%s/v1", publicURL) for _, route := range spiRoutes { if ce, ok := route.Extensions.(*coreExtensions); ok { if ce.CoreJSONHandler != nil { - r.HandleFunc(fmt.Sprintf("/spi/v1/%s", route.Path), as.routeHandler(hf, mgr, apiBaseURL, route)). + r.HandleFunc(fmt.Sprintf("/spi/v1/%s", route.Path), as.routeHandler(hf, mgr, spiBaseURL, route)). Methods(route.Method) } } } - r.HandleFunc(`/spi/swagger{ext:\.yaml|\.json|}`, hf.APIWrapper(as.swaggerHandler(as.swaggerGenerator(spiRoutes, apiBaseURL)))) - r.HandleFunc(`/spi`, hf.APIWrapper(hf.SwaggerUIHandler(publicURL+"/swagger.yaml"))) + + // Swagger for SPI + oaf := &ffapi.OpenAPIHandlerFactory{ + BaseSwaggerGenOptions: as.baseSwaggerGenOptions(), + StaticPublicURL: publicURL, + DynamicPublicURLHeader: as.dynamicPublicURLHeader, + } + r.HandleFunc(`/spi/swagger.json`, hf.APIWrapper(oaf.OpenAPIHandler(`/spi/v1`, ffapi.OpenAPIFormatJSON, spiRoutes))) + r.HandleFunc(`/spi/openapi.json`, hf.APIWrapper(oaf.OpenAPIHandler(`/spi/v1`, ffapi.OpenAPIFormatJSON, spiRoutes))) + r.HandleFunc(`/spi/swagger.yaml`, hf.APIWrapper(oaf.OpenAPIHandler(`/spi/v1`, ffapi.OpenAPIFormatYAML, spiRoutes))) + r.HandleFunc(`/spi/openapi.yaml`, hf.APIWrapper(oaf.OpenAPIHandler(`/spi/v1`, ffapi.OpenAPIFormatYAML, spiRoutes))) + r.HandleFunc(`/spi`, hf.APIWrapper(oaf.SwaggerUIHandler(`/spi/openapi.yaml`))) + r.HandleFunc(`/favicon{any:.*}.png`, favIcons) r.HandleFunc(`/spi/ws`, as.spiWSHandler(mgr)) diff --git a/internal/apiserver/server_test.go b/internal/apiserver/server_test.go index 773cdf0218..8d61e03c07 100644 --- a/internal/apiserver/server_test.go +++ b/internal/apiserver/server_test.go @@ -20,17 +20,19 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" - "io/ioutil" + "io" "mime/multipart" "net/http" "net/http/httptest" "testing" - "time" "github.com/getkin/kin-openapi/openapi3" + "github.com/go-resty/resty/v2" "github.com/gorilla/mux" "github.com/hyperledger/firefly-common/pkg/config" + "github.com/hyperledger/firefly-common/pkg/ffapi" "github.com/hyperledger/firefly-common/pkg/fftypes" "github.com/hyperledger/firefly-common/pkg/httpserver" "github.com/hyperledger/firefly-common/pkg/i18n" @@ -42,6 +44,7 @@ import ( "github.com/hyperledger/firefly/mocks/namespacemocks" "github.com/hyperledger/firefly/mocks/orchestratormocks" "github.com/hyperledger/firefly/mocks/spieventsmocks" + "github.com/hyperledger/firefly/mocks/websocketsmocks" "github.com/hyperledger/firefly/pkg/core" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -58,10 +61,7 @@ func newTestServer() (*namespacemocks.Manager, *orchestratormocks.Orchestrator, mgr.On("Orchestrator", mock.Anything, "mynamespace", false).Return(o, nil).Maybe() mgr.On("Orchestrator", mock.Anything, "ns1", false).Return(o, nil).Maybe() config.Set(coreconfig.APIMaxFilterLimit, 100) - as := &apiServer{ - apiTimeout: 5 * time.Second, - ffiSwaggerGen: &apiservermocks.FFISwaggerGen{}, - } + as := NewAPIServer().(*apiServer) return mgr, o, as } @@ -224,36 +224,68 @@ func TestUnauthorized(t *testing.T) { assert.Regexp(t, "FF00169", resJSON["error"]) } -func TestSwaggerYAML(t *testing.T) { - _, _, as := newTestServer() - handler := as.handlerFactory().APIWrapper(as.swaggerHandler(as.swaggerGenerator(routes, "http://localhost:12345/api/v1"))) - s := httptest.NewServer(http.HandlerFunc(handler)) +func TestSwaggerJSON(t *testing.T) { + o, r := newTestAPIServer() + o.On("Authorize", mock.Anything, mock.Anything).Return(nil) + s := httptest.NewServer(r) defer s.Close() - res, err := http.Get(fmt.Sprintf("http://%s/api/swagger.yaml", s.Listener.Addr())) + res, err := http.Get(fmt.Sprintf("http://%s/api/swagger.json", s.Listener.Addr())) assert.NoError(t, err) assert.Equal(t, 200, res.StatusCode) - b, _ := ioutil.ReadAll(res.Body) - doc, err := openapi3.NewLoader().LoadFromData(b) - assert.NoError(t, err) - err = doc.Validate(context.Background()) + b, _ := io.ReadAll(res.Body) + err = json.Unmarshal(b, &openapi3.T{}) assert.NoError(t, err) } -func TestSwaggerJSON(t *testing.T) { +func TestNamespacedSwaggerJSON(t *testing.T) { o, r := newTestAPIServer() o.On("Authorize", mock.Anything, mock.Anything).Return(nil) s := httptest.NewServer(r) defer s.Close() - res, err := http.Get(fmt.Sprintf("http://%s/api/swagger.json", s.Listener.Addr())) + res, err := http.Get(fmt.Sprintf("http://%s/api/v1/namespaces/test/api/swagger.json", s.Listener.Addr())) assert.NoError(t, err) assert.Equal(t, 200, res.StatusCode) - b, _ := ioutil.ReadAll(res.Body) + b, _ := io.ReadAll(res.Body) err = json.Unmarshal(b, &openapi3.T{}) assert.NoError(t, err) } +func TestNamespacedSwaggerUI(t *testing.T) { + mgr, o, as := newTestServer() + r := as.createMuxRouter(context.Background(), mgr) + o.On("Authorize", mock.Anything, mock.Anything).Return(nil) + s := httptest.NewServer(r) + defer s.Close() + + res, err := resty.New().R(). + SetDoNotParseResponse(true). + Get(fmt.Sprintf("http://%s/api/v1/namespaces/test/api", s.Listener.Addr())) + assert.NoError(t, err) + assert.Equal(t, 200, res.StatusCode()) + b, _ := io.ReadAll(res.RawBody()) + assert.Contains(t, string(b), "http://127.0.0.1:5000/api/v1/namespaces/test/api/openapi.yaml") +} + +func TestNamespacedSwaggerUIRewrite(t *testing.T) { + mgr, o, as := newTestServer() + r := as.createMuxRouter(context.Background(), mgr) + o.On("Authorize", mock.Anything, mock.Anything).Return(nil) + s := httptest.NewServer(r) + defer s.Close() + as.dynamicPublicURLHeader = "X-API-Rewrite" + + res, err := resty.New().R(). + SetDoNotParseResponse(true). + SetHeader("X-API-Rewrite", "https://myhost.example.com/mypath/to/namespace/"). + Get(fmt.Sprintf("http://%s/api/v1/namespaces/test/api", s.Listener.Addr())) + assert.NoError(t, err) + assert.Equal(t, 200, res.StatusCode()) + b, _ := io.ReadAll(res.RawBody()) + assert.Contains(t, string(b), "https://myhost.example.com/mypath/to/namespace/api/openapi.yaml") +} + func TestSwaggerAdminJSON(t *testing.T) { _, r := newTestSPIServer() s := httptest.NewServer(r) @@ -262,7 +294,7 @@ func TestSwaggerAdminJSON(t *testing.T) { res, err := http.Get(fmt.Sprintf("http://%s/spi/swagger.json", s.Listener.Addr())) assert.NoError(t, err) assert.Equal(t, 200, res.StatusCode) - b, _ := ioutil.ReadAll(res.Body) + b, _ := io.ReadAll(res.Body) err = json.Unmarshal(b, &openapi3.T{}) assert.NoError(t, err) } @@ -292,7 +324,8 @@ func TestContractAPISwaggerJSON(t *testing.T) { r := as.createMuxRouter(context.Background(), mgr) mcm := &contractmocks.Manager{} o.On("Contracts").Return(mcm) - mffi := as.ffiSwaggerGen.(*apiservermocks.FFISwaggerGen) + mffi := apiservermocks.NewFFISwaggerGen(t) + as.ffiSwaggerGen = mffi s := httptest.NewServer(r) defer s.Close() @@ -302,14 +335,17 @@ func TestContractAPISwaggerJSON(t *testing.T) { ID: fftypes.NewUUID(), }, } + as.dynamicPublicURLHeader = "X-API-BaseURL" - mcm.On("GetContractAPI", mock.Anything, "http://127.0.0.1:5000/api/v1", "my-api").Return(api, nil) + mcm.On("GetContractAPI", mock.Anything, "http://mydomain.com/path/to/default", "my-api").Return(api, nil) mcm.On("GetFFIByIDWithChildren", mock.Anything, api.Interface.ID).Return(ffi, nil) - mffi.On("Generate", mock.Anything, "http://127.0.0.1:5000/api/v1/namespaces/default/apis/my-api", api, ffi).Return(&openapi3.T{}) + mffi.On("Build", mock.Anything, api, ffi).Return(&ffapi.SwaggerGenOptions{}, []*ffapi.Route{}) - res, err := http.Get(fmt.Sprintf("http://%s/api/v1/namespaces/default/apis/my-api/api/swagger.json", s.Listener.Addr())) + res, err := resty.New().R(). + SetHeader("X-API-BaseURL", "http://mydomain.com/path/to/default"). + Get(fmt.Sprintf("http://%s/api/v1/namespaces/default/apis/my-api/api/swagger.json", s.Listener.Addr())) assert.NoError(t, err) - assert.Equal(t, 200, res.StatusCode) + assert.Equal(t, 200, res.StatusCode()) } func TestContractAPISwaggerJSONGetAPIFail(t *testing.T) { @@ -320,7 +356,7 @@ func TestContractAPISwaggerJSONGetAPIFail(t *testing.T) { s := httptest.NewServer(r) defer s.Close() - mcm.On("GetContractAPI", mock.Anything, "http://127.0.0.1:5000/api/v1", "my-api").Return(nil, fmt.Errorf("pop")) + mcm.On("GetContractAPI", mock.Anything, "http://127.0.0.1:5000/api/v1/namespaces/default", "my-api").Return(nil, fmt.Errorf("pop")) res, err := http.Get(fmt.Sprintf("http://%s/api/v1/namespaces/default/apis/my-api/api/swagger.json", s.Listener.Addr())) assert.NoError(t, err) @@ -335,7 +371,7 @@ func TestContractAPISwaggerJSONGetAPINotFound(t *testing.T) { s := httptest.NewServer(r) defer s.Close() - mcm.On("GetContractAPI", mock.Anything, "http://127.0.0.1:5000/api/v1", "my-api").Return(nil, nil) + mcm.On("GetContractAPI", mock.Anything, "http://127.0.0.1:5000/api/v1/namespaces/default", "my-api").Return(nil, nil) res, err := http.Get(fmt.Sprintf("http://%s/api/v1/namespaces/default/apis/my-api/api/swagger.json", s.Listener.Addr())) assert.NoError(t, err) @@ -356,7 +392,7 @@ func TestContractAPISwaggerJSONGetFFIFail(t *testing.T) { }, } - mcm.On("GetContractAPI", mock.Anything, "http://127.0.0.1:5000/api/v1", "my-api").Return(api, nil) + mcm.On("GetContractAPI", mock.Anything, "http://127.0.0.1:5000/api/v1/namespaces/default", "my-api").Return(api, nil) mcm.On("GetFFIByIDWithChildren", mock.Anything, api.Interface.ID).Return(nil, fmt.Errorf("pop")) res, err := http.Get(fmt.Sprintf("http://%s/api/v1/namespaces/default/apis/my-api/api/swagger.json", s.Listener.Addr())) @@ -388,7 +424,7 @@ func TestContractAPISwaggerUI(t *testing.T) { res, err := http.Get(fmt.Sprintf("http://%s/api/v1/namespaces/default/apis/my-api/api", s.Listener.Addr())) assert.NoError(t, err) assert.Equal(t, 200, res.StatusCode) - b, _ := ioutil.ReadAll(res.Body) + b, _ := io.ReadAll(res.Body) assert.Regexp(t, "html", string(b)) } @@ -479,3 +515,37 @@ func TestGetOrchestratorMissingTag(t *testing.T) { _, err := getOrchestrator(context.Background(), &namespacemocks.Manager{}, "", nil) assert.Regexp(t, "FF10437", err) } + +func TestGetNamespacedWebSocketHandler(t *testing.T) { + mgr, _, _ := newTestServer() + mwsns := &websocketsmocks.WebSocketsNamespaced{} + mwsns.On("ServeHTTPNamespaced", "ns1", mock.Anything, mock.Anything).Return() + + var b bytes.Buffer + req := httptest.NewRequest("GET", "/api/v1/namespaces/ns1/ws", &b) + req = mux.SetURLVars(req, map[string]string{"ns": "ns1"}) + req.Header.Set("Content-Type", "application/json; charset=utf-8") + res := httptest.NewRecorder() + + handler := getNamespacedWebSocketHandler(mwsns, mgr) + status, err := handler(res, req) + assert.NoError(t, err) + assert.Equal(t, 200, status) +} + +func TestGetNamespacedWebSocketHandlerUnknownNamespace(t *testing.T) { + mgr, _, _ := newTestServer() + mwsns := &websocketsmocks.WebSocketsNamespaced{} + + mgr.On("Orchestrator", mock.Anything, "unknown", false).Return(nil, errors.New("unknown namespace")).Maybe() + var b bytes.Buffer + req := httptest.NewRequest("GET", "/api/v1/namespaces/unknown/ws", &b) + req = mux.SetURLVars(req, map[string]string{"ns": "unknown"}) + req.Header.Set("Content-Type", "application/json; charset=utf-8") + res := httptest.NewRecorder() + + handler := getNamespacedWebSocketHandler(mwsns, mgr) + status, err := handler(res, req) + assert.Error(t, err) + assert.Equal(t, 404, status) +} diff --git a/internal/apiserver/spi_routes.go b/internal/apiserver/spi_routes.go index ef5f7963c8..ff22f21c17 100644 --- a/internal/apiserver/spi_routes.go +++ b/internal/apiserver/spi_routes.go @@ -16,7 +16,11 @@ package apiserver -import "github.com/hyperledger/firefly-common/pkg/ffapi" +import ( + "github.com/hyperledger/firefly-common/pkg/ffapi" + "github.com/hyperledger/firefly/internal/coreconfig" + "github.com/hyperledger/firefly/internal/coremsgs" +) // The Service Provider Interface (SPI) allows external microservices (such as the FireFly Transaction Manager) // to act as augmented components to the core. @@ -27,7 +31,24 @@ var spiRoutes = append(globalRoutes([]*ffapi.Route{ spiPatchOpByID, spiPostReset, }), - namespacedRoutes([]*ffapi.Route{ + namespacedSPIRoutes([]*ffapi.Route{ spiGetOps, })..., ) + +func namespacedSPIRoutes(routes []*ffapi.Route) []*ffapi.Route { + newRoutes := make([]*ffapi.Route, len(routes)) + for i, route := range routes { + route.Tag = routeTagDefaultNamespace + + routeCopy := *route + routeCopy.Name += "Namespace" + routeCopy.Path = "namespaces/{ns}/" + route.Path + routeCopy.PathParams = append(routeCopy.PathParams, &ffapi.PathParam{ + Name: "ns", ExampleFromConf: coreconfig.NamespacesDefault, Description: coremsgs.APIParamsNamespace, + }) + routeCopy.Tag = routeTagNonDefaultNamespace + newRoutes[i] = &routeCopy + } + return append(routes, newRoutes...) +} diff --git a/internal/apiserver/swagger_check_test.go b/internal/apiserver/swagger_check_test.go index fb444326dd..10e3dc67e1 100644 --- a/internal/apiserver/swagger_check_test.go +++ b/internal/apiserver/swagger_check_test.go @@ -22,7 +22,6 @@ package apiserver import ( "context" "crypto/sha1" - "fmt" "io/ioutil" "net/http" "net/http/httptest" @@ -32,6 +31,7 @@ import ( "github.com/getkin/kin-openapi/openapi3" "github.com/hyperledger/firefly-common/pkg/config" + "github.com/hyperledger/firefly-common/pkg/ffapi" "github.com/hyperledger/firefly/internal/coreconfig" "github.com/stretchr/testify/assert" @@ -42,11 +42,14 @@ func TestDiffSwaggerYAML(t *testing.T) { config.Set(coreconfig.APIOASPanicOnMissingDescription, true) as := &apiServer{} hf := as.handlerFactory() - handler := hf.APIWrapper(as.swaggerHandler(as.swaggerGenerator(routes, "http://localhost:5000"))) - s := httptest.NewServer(http.HandlerFunc(handler)) + handler := &ffapi.OpenAPIHandlerFactory{ + BaseSwaggerGenOptions: as.baseSwaggerGenOptions(), + StaticPublicURL: "http://localhost:5000", + } + s := httptest.NewServer(http.HandlerFunc(hf.APIWrapper(handler.OpenAPIHandler("/api/v1", ffapi.OpenAPIFormatYAML, routes)))) defer s.Close() - res, err := http.Get(fmt.Sprintf("http://%s/api/swagger.yaml", s.Listener.Addr())) + res, err := http.Get(s.URL) assert.NoError(t, err) assert.Equal(t, 200, res.StatusCode) b, _ := ioutil.ReadAll(res.Body) diff --git a/internal/apiserver/swagger_generate_test.go b/internal/apiserver/swagger_generate_test.go index bd03ce5ff9..ce5171ecf0 100644 --- a/internal/apiserver/swagger_generate_test.go +++ b/internal/apiserver/swagger_generate_test.go @@ -31,6 +31,7 @@ import ( "github.com/getkin/kin-openapi/openapi3" "github.com/hyperledger/firefly-common/pkg/config" + "github.com/hyperledger/firefly-common/pkg/ffapi" "github.com/hyperledger/firefly/internal/coreconfig" "github.com/stretchr/testify/assert" ) @@ -40,8 +41,11 @@ func TestDownloadSwaggerYAML(t *testing.T) { config.Set(coreconfig.APIOASPanicOnMissingDescription, true) as := &apiServer{} hf := as.handlerFactory() - handler := hf.APIWrapper(as.swaggerHandler(as.swaggerGenerator(routes, "http://localhost:5000"))) - s := httptest.NewServer(http.HandlerFunc(handler)) + handler := &ffapi.OpenAPIHandlerFactory{ + BaseSwaggerGenOptions: as.baseSwaggerGenOptions(), + StaticPublicURL: "http://localhost:5000", + } + s := httptest.NewServer(http.HandlerFunc(hf.APIWrapper(handler.OpenAPIHandler("/api/v1", ffapi.OpenAPIFormatYAML, routes)))) defer s.Close() res, err := http.Get(fmt.Sprintf("http://%s/api/swagger.yaml", s.Listener.Addr())) diff --git a/internal/assets/manager.go b/internal/assets/manager.go index b16ddb6066..77ceaf8248 100644 --- a/internal/assets/manager.go +++ b/internal/assets/manager.go @@ -23,7 +23,9 @@ import ( "github.com/hyperledger/firefly-common/pkg/fftypes" "github.com/hyperledger/firefly-common/pkg/i18n" "github.com/hyperledger/firefly/internal/broadcast" + "github.com/hyperledger/firefly/internal/cache" "github.com/hyperledger/firefly/internal/contracts" + "github.com/hyperledger/firefly/internal/coreconfig" "github.com/hyperledger/firefly/internal/coremsgs" "github.com/hyperledger/firefly/internal/identity" "github.com/hyperledger/firefly/internal/metrics" @@ -42,9 +44,11 @@ type Manager interface { CreateTokenPool(ctx context.Context, pool *core.TokenPoolInput, waitConfirm bool) (*core.TokenPool, error) ActivateTokenPool(ctx context.Context, pool *core.TokenPool) error GetTokenPools(ctx context.Context, filter ffapi.AndFilter) ([]*core.TokenPool, *ffapi.FilterResult, error) - GetTokenPool(ctx context.Context, connector, poolName string) (*core.TokenPool, error) + GetTokenPoolByLocator(ctx context.Context, connector, poolLocator string) (*core.TokenPool, error) GetTokenPoolByNameOrID(ctx context.Context, poolNameOrID string) (*core.TokenPool, error) + GetTokenPoolByID(ctx context.Context, id *fftypes.UUID) (*core.TokenPool, error) ResolvePoolMethods(ctx context.Context, pool *core.TokenPool) error + DeleteTokenPool(ctx context.Context, poolNameOrID string) error GetTokenBalances(ctx context.Context, filter ffapi.AndFilter) ([]*core.TokenBalance, *ffapi.FilterResult, error) GetTokenAccounts(ctx context.Context, filter ffapi.AndFilter) ([]*core.TokenAccount, *ffapi.FilterResult, error) @@ -66,7 +70,7 @@ type Manager interface { // From operations.OperationHandler PrepareOperation(ctx context.Context, op *core.Operation) (*core.PreparedOperation, error) - RunOperation(ctx context.Context, op *core.PreparedOperation) (outputs fftypes.JSONObject, complete bool, err error) + RunOperation(ctx context.Context, op *core.PreparedOperation) (outputs fftypes.JSONObject, phase core.OpPhase, err error) } type assetManager struct { @@ -82,13 +86,15 @@ type assetManager struct { metrics metrics.Manager operations operations.Manager contracts contracts.Manager + cache cache.CInterface keyNormalization int } -func NewAssetManager(ctx context.Context, ns, keyNormalization string, di database.Plugin, ti map[string]tokens.Plugin, im identity.Manager, sa syncasync.Bridge, bm broadcast.Manager, pm privatemessaging.Manager, mm metrics.Manager, om operations.Manager, cm contracts.Manager, txHelper txcommon.Helper) (Manager, error) { +func NewAssetManager(ctx context.Context, ns, keyNormalization string, di database.Plugin, ti map[string]tokens.Plugin, im identity.Manager, sa syncasync.Bridge, bm broadcast.Manager, pm privatemessaging.Manager, mm metrics.Manager, om operations.Manager, cm contracts.Manager, txHelper txcommon.Helper, cacheManager cache.Manager) (Manager, error) { if di == nil || im == nil || sa == nil || ti == nil || mm == nil || om == nil { return nil, i18n.NewError(ctx, coremsgs.MsgInitializationNilDepError, "AssetManager") } + var err error am := &assetManager{ ctx: ctx, namespace: ns, @@ -104,6 +110,19 @@ func NewAssetManager(ctx context.Context, ns, keyNormalization string, di databa operations: om, contracts: cm, } + if cacheManager != nil { + am.cache, err = cacheManager.GetCache( + cache.NewCacheConfig( + ctx, + coreconfig.CacheTokenPoolLimit, + coreconfig.CacheTokenPoolTTL, + "", + ), + ) + if err != nil { + return nil, err + } + } om.RegisterHandler(ctx, am, []core.OpType{ core.OpTypeTokenCreatePool, core.OpTypeTokenActivatePool, diff --git a/internal/assets/manager_test.go b/internal/assets/manager_test.go index 19252e56f3..b71d7b734b 100644 --- a/internal/assets/manager_test.go +++ b/internal/assets/manager_test.go @@ -17,6 +17,7 @@ package assets import ( "context" + "errors" "testing" "time" @@ -71,7 +72,7 @@ func newTestAssetsCommon(t *testing.T, metrics bool) (*assetManager, func()) { mom.On("RegisterHandler", mock.Anything, mock.Anything, mock.Anything) mti.On("Name").Return("ut").Maybe() ctx, cancel := context.WithCancel(ctx) - a, err := NewAssetManager(ctx, "ns1", "blockchain_plugin", mdi, map[string]tokens.Plugin{"magic-tokens": mti}, mim, msa, mbm, mpm, mm, mom, mcm, txHelper) + a, err := NewAssetManager(ctx, "ns1", "blockchain_plugin", mdi, map[string]tokens.Plugin{"magic-tokens": mti}, mim, msa, mbm, mpm, mm, mom, mcm, txHelper, cmi) rag := mdi.On("RunAsGroup", mock.Anything, mock.Anything).Maybe() rag.RunFn = func(a mock.Arguments) { rag.ReturnArguments = mock.Arguments{a[1].(func(context.Context) error)(a[0].(context.Context))} @@ -83,10 +84,32 @@ func newTestAssetsCommon(t *testing.T, metrics bool) (*assetManager, func()) { } func TestInitFail(t *testing.T) { - _, err := NewAssetManager(context.Background(), "", "", nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + _, err := NewAssetManager(context.Background(), "", "", nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) assert.Regexp(t, "FF10128", err) } +func TestCacheInitFail(t *testing.T) { + cacheInitError := errors.New("Initialization error.") + coreconfig.Reset() + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} + mim := &identitymanagermocks.Manager{} + msa := &syncasyncmocks.Bridge{} + mbm := &broadcastmocks.Manager{} + mpm := &privatemessagingmocks.Manager{} + mti := &tokenmocks.Plugin{} + mm := &metricsmocks.Manager{} + mom := &operationmocks.Manager{} + mcm := &contractmocks.Manager{} + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(nil, cacheInitError) + txHelper, _ := txcommon.NewTransactionHelper(context.Background(), "ns1", mdi, mdm, cmi) + + _, err := NewAssetManager(context.Background(), "ns1", "blockchain_plugin", mdi, map[string]tokens.Plugin{"magic-tokens": mti}, mim, msa, mbm, mpm, mm, mom, mcm, txHelper, cmi) + + assert.Equal(t, cacheInitError, err) +} + func TestName(t *testing.T) { am, cancel := newTestAssets(t) defer cancel() diff --git a/internal/assets/operations.go b/internal/assets/operations.go index 5d7fb415ef..f163491808 100644 --- a/internal/assets/operations.go +++ b/internal/assets/operations.go @@ -24,6 +24,7 @@ import ( "github.com/hyperledger/firefly-common/pkg/i18n" "github.com/hyperledger/firefly-common/pkg/log" "github.com/hyperledger/firefly/internal/coremsgs" + "github.com/hyperledger/firefly/internal/operations" "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/pkg/core" ) @@ -60,7 +61,7 @@ func (am *assetManager) PrepareOperation(ctx context.Context, op *core.Operation if err != nil { return nil, err } - pool, err := am.database.GetTokenPoolByID(ctx, am.namespace, poolID) + pool, err := am.GetTokenPoolByID(ctx, poolID) if err != nil { return nil, err } else if pool == nil { @@ -73,7 +74,7 @@ func (am *assetManager) PrepareOperation(ctx context.Context, op *core.Operation if err != nil { return nil, err } - pool, err := am.database.GetTokenPoolByID(ctx, am.namespace, transfer.Pool) + pool, err := am.GetTokenPoolByID(ctx, transfer.Pool) if err != nil { return nil, err } else if pool == nil { @@ -86,7 +87,7 @@ func (am *assetManager) PrepareOperation(ctx context.Context, op *core.Operation if err != nil { return nil, err } - pool, err := am.database.GetTokenPoolByID(ctx, am.namespace, approval.Pool) + pool, err := am.GetTokenPoolByID(ctx, approval.Pool) if err != nil { return nil, err } else if pool == nil { @@ -99,49 +100,50 @@ func (am *assetManager) PrepareOperation(ctx context.Context, op *core.Operation } } -func (am *assetManager) RunOperation(ctx context.Context, op *core.PreparedOperation) (outputs fftypes.JSONObject, complete bool, err error) { +func (am *assetManager) RunOperation(ctx context.Context, op *core.PreparedOperation) (outputs fftypes.JSONObject, phase core.OpPhase, err error) { switch data := op.Data.(type) { case createPoolData: plugin, err := am.selectTokenPlugin(ctx, data.Pool.Connector) if err != nil { - return nil, false, err + return nil, core.OpPhaseInitializing, err } - complete, err = plugin.CreateTokenPool(ctx, op.NamespacedIDString(), data.Pool) - return nil, complete, err + phase, err = plugin.CreateTokenPool(ctx, op.NamespacedIDString(), data.Pool) + return nil, phase, err case activatePoolData: plugin, err := am.selectTokenPlugin(ctx, data.Pool.Connector) if err != nil { - return nil, false, err + return nil, core.OpPhaseInitializing, err } - complete, err = plugin.ActivateTokenPool(ctx, op.NamespacedIDString(), data.Pool) - return nil, complete, err + phase, err = plugin.ActivateTokenPool(ctx, data.Pool) + return nil, phase, err case transferData: plugin, err := am.selectTokenPlugin(ctx, data.Pool.Connector) if err != nil { - return nil, false, err + return nil, core.OpPhaseInitializing, err } switch data.Transfer.Type { case core.TokenTransferTypeMint: - return nil, false, plugin.MintTokens(ctx, op.NamespacedIDString(), data.Pool.Locator, data.Transfer, data.Pool.Methods) + err = plugin.MintTokens(ctx, op.NamespacedIDString(), data.Pool.Locator, data.Transfer, data.Pool.Methods) case core.TokenTransferTypeTransfer: - return nil, false, plugin.TransferTokens(ctx, op.NamespacedIDString(), data.Pool.Locator, data.Transfer, data.Pool.Methods) + err = plugin.TransferTokens(ctx, op.NamespacedIDString(), data.Pool.Locator, data.Transfer, data.Pool.Methods) case core.TokenTransferTypeBurn: - return nil, false, plugin.BurnTokens(ctx, op.NamespacedIDString(), data.Pool.Locator, data.Transfer, data.Pool.Methods) + err = plugin.BurnTokens(ctx, op.NamespacedIDString(), data.Pool.Locator, data.Transfer, data.Pool.Methods) default: panic(fmt.Sprintf("unknown transfer type: %v", data.Transfer.Type)) } + return nil, operations.ErrTernary(err, core.OpPhaseInitializing, core.OpPhasePending), err case approvalData: plugin, err := am.selectTokenPlugin(ctx, data.Pool.Connector) if err != nil { - return nil, false, err + return nil, core.OpPhaseInitializing, err } - return nil, false, plugin.TokensApproval(ctx, op.NamespacedIDString(), data.Pool.Locator, data.Approval, data.Pool.Methods) + return nil, core.OpPhaseInitializing, plugin.TokensApproval(ctx, op.NamespacedIDString(), data.Pool.Locator, data.Approval, data.Pool.Methods) default: - return nil, false, i18n.NewError(ctx, coremsgs.MsgOperationDataIncorrect, op.Data) + return nil, core.OpPhaseInitializing, i18n.NewError(ctx, coremsgs.MsgOperationDataIncorrect, op.Data) } } diff --git a/internal/assets/operations_test.go b/internal/assets/operations_test.go index c9f89d4a23..f4c79bb4e2 100644 --- a/internal/assets/operations_test.go +++ b/internal/assets/operations_test.go @@ -46,15 +46,15 @@ func TestPrepareAndRunCreatePool(t *testing.T) { assert.NoError(t, err) mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) - mti.On("CreateTokenPool", context.Background(), "ns1:"+op.ID.String(), pool).Return(false, nil) + mti.On("CreateTokenPool", context.Background(), "ns1:"+op.ID.String(), pool).Return(core.OpPhaseComplete, nil) po, err := am.PrepareOperation(context.Background(), op) assert.NoError(t, err) assert.Equal(t, pool, po.Data.(createPoolData).Pool) - _, complete, err := am.RunOperation(context.Background(), po) + _, phase, err := am.RunOperation(context.Background(), po) - assert.False(t, complete) + assert.Equal(t, core.OpPhaseComplete, phase) assert.NoError(t, err) mti.AssertExpectations(t) @@ -78,16 +78,16 @@ func TestPrepareAndRunActivatePool(t *testing.T) { mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mdi := am.database.(*databasemocks.Plugin) - mti.On("ActivateTokenPool", context.Background(), "ns1:"+op.ID.String(), pool).Return(true, nil) + mti.On("ActivateTokenPool", context.Background(), pool).Return(core.OpPhaseComplete, nil) mdi.On("GetTokenPoolByID", context.Background(), "ns1", pool.ID).Return(pool, nil) po, err := am.PrepareOperation(context.Background(), op) assert.NoError(t, err) assert.Equal(t, pool, po.Data.(activatePoolData).Pool) - _, complete, err := am.RunOperation(context.Background(), po) + _, phase, err := am.RunOperation(context.Background(), po) - assert.True(t, complete) + assert.Equal(t, core.OpPhaseComplete, phase) assert.NoError(t, err) mti.AssertExpectations(t) @@ -124,9 +124,9 @@ func TestPrepareAndRunTransfer(t *testing.T) { assert.Equal(t, pool, po.Data.(transferData).Pool) assert.Equal(t, transfer, po.Data.(transferData).Transfer) - _, complete, err := am.RunOperation(context.Background(), po) + _, phase, err := am.RunOperation(context.Background(), po) - assert.False(t, complete) + assert.Equal(t, core.OpPhasePending, phase) assert.NoError(t, err) mti.AssertExpectations(t) @@ -163,9 +163,9 @@ func TestPrepareAndRunApproval(t *testing.T) { assert.Equal(t, pool, po.Data.(approvalData).Pool) assert.Equal(t, approval, po.Data.(approvalData).Approval) - _, complete, err := am.RunOperation(context.Background(), po) + _, phase, err := am.RunOperation(context.Background(), po) - assert.False(t, complete) + assert.Equal(t, core.OpPhaseInitializing, phase) assert.NoError(t, err) mti.AssertExpectations(t) @@ -352,9 +352,9 @@ func TestRunOperationNotSupported(t *testing.T) { am, cancel := newTestAssets(t) defer cancel() - _, complete, err := am.RunOperation(context.Background(), &core.PreparedOperation{}) + _, phase, err := am.RunOperation(context.Background(), &core.PreparedOperation{}) - assert.False(t, complete) + assert.Equal(t, core.OpPhaseInitializing, phase) assert.Regexp(t, "FF10378", err) } @@ -365,9 +365,9 @@ func TestRunOperationCreatePoolBadPlugin(t *testing.T) { op := &core.Operation{} pool := &core.TokenPool{} - _, complete, err := am.RunOperation(context.Background(), opCreatePool(op, pool)) + _, phase, err := am.RunOperation(context.Background(), opCreatePool(op, pool)) - assert.False(t, complete) + assert.Equal(t, core.OpPhaseInitializing, phase) assert.Regexp(t, "FF10272", err) } @@ -384,11 +384,11 @@ func TestRunOperationCreatePool(t *testing.T) { } mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) - mti.On("CreateTokenPool", context.Background(), "ns1:"+op.ID.String(), pool).Return(false, nil) + mti.On("CreateTokenPool", context.Background(), "ns1:"+op.ID.String(), pool).Return(core.OpPhaseInitializing, nil) - _, complete, err := am.RunOperation(context.Background(), opCreatePool(op, pool)) + _, phase, err := am.RunOperation(context.Background(), opCreatePool(op, pool)) - assert.False(t, complete) + assert.Equal(t, core.OpPhaseInitializing, phase) assert.NoError(t, err) mti.AssertExpectations(t) @@ -401,9 +401,9 @@ func TestRunOperationActivatePoolBadPlugin(t *testing.T) { op := &core.Operation{} pool := &core.TokenPool{} - _, complete, err := am.RunOperation(context.Background(), opActivatePool(op, pool)) + _, phase, err := am.RunOperation(context.Background(), opActivatePool(op, pool)) - assert.False(t, complete) + assert.Equal(t, core.OpPhaseInitializing, phase) assert.Regexp(t, "FF10272", err) } @@ -415,9 +415,9 @@ func TestRunOperationTransferBadPlugin(t *testing.T) { pool := &core.TokenPool{} transfer := &core.TokenTransfer{} - _, complete, err := am.RunOperation(context.Background(), opTransfer(op, pool, transfer)) + _, phase, err := am.RunOperation(context.Background(), opTransfer(op, pool, transfer)) - assert.False(t, complete) + assert.Equal(t, core.OpPhaseInitializing, phase) assert.Regexp(t, "FF10272", err) } @@ -429,9 +429,9 @@ func TestRunOperationApprovalBadPlugin(t *testing.T) { pool := &core.TokenPool{} approval := &core.TokenApproval{} - _, complete, err := am.RunOperation(context.Background(), opApproval(op, pool, approval)) + _, phase, err := am.RunOperation(context.Background(), opApproval(op, pool, approval)) - assert.False(t, complete) + assert.Equal(t, core.OpPhaseInitializing, phase) assert.Regexp(t, "FF10272", err) } @@ -473,9 +473,9 @@ func TestRunOperationTransferMint(t *testing.T) { mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mti.On("MintTokens", context.Background(), "ns1:"+op.ID.String(), "F1", transfer, (*fftypes.JSONAny)(nil)).Return(nil) - _, complete, err := am.RunOperation(context.Background(), opTransfer(op, pool, transfer)) + _, phase, err := am.RunOperation(context.Background(), opTransfer(op, pool, transfer)) - assert.False(t, complete) + assert.Equal(t, core.OpPhasePending, phase) assert.NoError(t, err) mti.AssertExpectations(t) @@ -501,9 +501,9 @@ func TestRunOperationTransferMintWithInterface(t *testing.T) { mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mti.On("MintTokens", context.Background(), "ns1:"+op.ID.String(), "F1", transfer, pool.Methods).Return(nil) - _, complete, err := am.RunOperation(context.Background(), opTransfer(op, pool, transfer)) + _, phase, err := am.RunOperation(context.Background(), opTransfer(op, pool, transfer)) - assert.False(t, complete) + assert.Equal(t, core.OpPhasePending, phase) assert.NoError(t, err) mti.AssertExpectations(t) @@ -528,9 +528,9 @@ func TestRunOperationTransferBurn(t *testing.T) { mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mti.On("BurnTokens", context.Background(), "ns1:"+op.ID.String(), "F1", transfer, (*fftypes.JSONAny)(nil)).Return(nil) - _, complete, err := am.RunOperation(context.Background(), opTransfer(op, pool, transfer)) + _, phase, err := am.RunOperation(context.Background(), opTransfer(op, pool, transfer)) - assert.False(t, complete) + assert.Equal(t, core.OpPhasePending, phase) assert.NoError(t, err) mti.AssertExpectations(t) @@ -555,9 +555,9 @@ func TestRunOperationTransfer(t *testing.T) { mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mti.On("TransferTokens", context.Background(), "ns1:"+op.ID.String(), "F1", transfer, (*fftypes.JSONAny)(nil)).Return(nil) - _, complete, err := am.RunOperation(context.Background(), opTransfer(op, pool, transfer)) + _, phase, err := am.RunOperation(context.Background(), opTransfer(op, pool, transfer)) - assert.False(t, complete) + assert.Equal(t, core.OpPhasePending, phase) assert.NoError(t, err) mti.AssertExpectations(t) diff --git a/internal/assets/token_approval.go b/internal/assets/token_approval.go index e7c4738155..406af13e61 100644 --- a/internal/assets/token_approval.go +++ b/internal/assets/token_approval.go @@ -34,10 +34,11 @@ func (am *assetManager) GetTokenApprovals(ctx context.Context, filter ffapi.AndF } type approveSender struct { - mgr *assetManager - approval *core.TokenApprovalInput - resolved bool - msgSender syncasync.Sender + mgr *assetManager + approval *core.TokenApprovalInput + resolved bool + msgSender syncasync.Sender + idempotentSubmit bool } func (s *approveSender) Prepare(ctx context.Context) error { @@ -58,8 +59,9 @@ func (s *approveSender) setDefaults() { func (am *assetManager) NewApproval(approval *core.TokenApprovalInput) syncasync.Sender { sender := &approveSender{ - mgr: am, - approval: approval, + mgr: am, + approval: approval, + idempotentSubmit: approval.IdempotencyKey != "", } sender.setDefaults() return sender @@ -107,17 +109,28 @@ func (s *approveSender) resolve(ctx context.Context) (opResubmitted bool, err er if err != nil { // Check if we've clashed on idempotency key. There might be operations still in "Initialized" state that need // submitting to their handlers + resubmitWholeTX := false if idemErr, ok := err.(*sqlcommon.IdempotencyError); ok { - operation, resubmitErr := s.mgr.operations.ResubmitOperations(ctx, idemErr.ExistingTXID) + total, resubmitted, resubmitErr := s.mgr.operations.ResubmitOperations(ctx, idemErr.ExistingTXID) if resubmitErr != nil { // Error doing resubmit, return the new error - err = resubmitErr - } else if operation != nil { - // We successfully resubmitted an initialized operation, return 2xx not 409 + return false, resubmitErr + } + if total == 0 { + // We didn't do anything last time - just start again + txid = idemErr.ExistingTXID + resubmitWholeTX = true + err = nil + } else if len(resubmitted) > 0 { + // We resubmitted something - translate the status code to 200 (true return) + s.approval.TX.ID = idemErr.ExistingTXID + s.approval.TX.Type = core.TransactionTypeTokenApproval return true, nil } } - return false, err + if !resubmitWholeTX { + return false, err + } } s.approval.TX.ID = txid s.approval.TX.Type = core.TransactionTypeTokenApproval @@ -191,7 +204,7 @@ func (s *approveSender) sendInternal(ctx context.Context, method sendMethod) (er } } - _, err = s.mgr.operations.RunOperation(ctx, opApproval(op, pool, &s.approval.TokenApproval)) + _, err = s.mgr.operations.RunOperation(ctx, opApproval(op, pool, &s.approval.TokenApproval), s.idempotentSubmit) return err } @@ -210,8 +223,8 @@ func (am *assetManager) validateApproval(ctx context.Context, approval *core.Tok approval.TokenApproval.Pool = pool.ID approval.TokenApproval.Connector = pool.Connector - if pool.State != core.TokenPoolStateConfirmed { - return nil, i18n.NewError(ctx, coremsgs.MsgTokenPoolNotConfirmed) + if !pool.Active { + return nil, i18n.NewError(ctx, coremsgs.MsgTokenPoolNotActive) } approval.Key, err = am.identity.ResolveInputSigningKey(ctx, approval.Key, am.keyNormalization) return pool, err diff --git a/internal/assets/token_approval_test.go b/internal/assets/token_approval_test.go index eb1bc7c5f7..b56ec732a3 100644 --- a/internal/assets/token_approval_test.go +++ b/internal/assets/token_approval_test.go @@ -67,7 +67,7 @@ func TestTokenApprovalSuccess(t *testing.T) { pool := &core.TokenPool{ Locator: "F1", Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, } mdi := am.database.(*databasemocks.Plugin) @@ -81,7 +81,7 @@ func TestTokenApprovalSuccess(t *testing.T) { mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(approvalData) return op.Type == core.OpTypeTokenApproval && data.Pool == pool && data.Approval == &approval.TokenApproval - })).Return(nil, nil) + }), true).Return(nil, nil) _, err := am.TokenApproval(context.Background(), approval, false) assert.NoError(t, err) @@ -107,7 +107,7 @@ func TestTokenApprovalSuccessUnknownIdentity(t *testing.T) { pool := &core.TokenPool{ Locator: "F1", Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, } mdi := am.database.(*databasemocks.Plugin) @@ -121,7 +121,7 @@ func TestTokenApprovalSuccessUnknownIdentity(t *testing.T) { mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(approvalData) return op.Type == core.OpTypeTokenApproval && data.Pool == pool && data.Approval == &approval.TokenApproval - })).Return(nil, nil) + }), true).Return(nil, nil) _, err := am.TokenApproval(context.Background(), approval, false) assert.NoError(t, err) @@ -148,7 +148,7 @@ func TestApprovalBadConnector(t *testing.T) { pool := &core.TokenPool{ Locator: "F1", Connector: "bad", - State: core.TokenPoolStateConfirmed, + Active: true, } mdi := am.database.(*databasemocks.Plugin) @@ -191,7 +191,7 @@ func TestApprovalDefaultPoolSuccess(t *testing.T) { Name: "pool1", Locator: "F1", Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, }, } totalCount := int64(1) @@ -208,7 +208,7 @@ func TestApprovalDefaultPoolSuccess(t *testing.T) { mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(approvalData) return op.Type == core.OpTypeTokenApproval && data.Pool == tokenPools[0] && data.Approval == &approval.TokenApproval - })).Return(nil, nil) + }), true).Return(nil, nil) _, err := am.TokenApproval(context.Background(), approval, false) assert.NoError(t, err) @@ -243,7 +243,68 @@ func TestApprovalIdempotentOperationResubmit(t *testing.T) { mth.On("SubmitNewTransaction", context.Background(), core.TransactionTypeTokenApproval, core.IdempotencyKey("idem1")).Return(id, &sqlcommon.IdempotencyError{ ExistingTXID: id, OriginalError: i18n.NewError(context.Background(), coremsgs.MsgIdempotencyKeyDuplicateTransaction, "idem1", id)}) - mom.On("ResubmitOperations", context.Background(), id).Return(op, nil) + mom.On("ResubmitOperations", context.Background(), id).Return(1, []*core.Operation{op}, nil) + + // If ResubmitOperations returns an operation it's because it found one to resubmit, so we return 2xx not 409, and don't expect an error + _, err := am.TokenApproval(context.Background(), approval, false) + assert.NoError(t, err) + + mth.AssertExpectations(t) + mom.AssertExpectations(t) +} + +func TestApprovalIdempotentOperationResubmitAll(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + var id = fftypes.NewUUID() + + approval := &core.TokenApprovalInput{ + TokenApproval: core.TokenApproval{ + Approved: true, + Operator: "operator", + Key: "key", + }, + IdempotencyKey: "idem1", + } + + pool := &core.TokenPool{ + Connector: "magic-tokens", + Active: true, + } + + mth := am.txHelper.(*txcommonmocks.Helper) + mom := am.operations.(*operationmocks.Manager) + mdi := am.database.(*databasemocks.Plugin) + mim := am.identity.(*identitymanagermocks.Manager) + + fb := database.TokenPoolQueryFactory.NewFilter(context.Background()) + f := fb.And() + f.Limit(1).Count(true) + mth.On("SubmitNewTransaction", context.Background(), core.TransactionTypeTokenApproval, core.IdempotencyKey("idem1")).Return(id, &sqlcommon.IdempotencyError{ + ExistingTXID: id, + OriginalError: i18n.NewError(context.Background(), coremsgs.MsgIdempotencyKeyDuplicateTransaction, "idem1", id)}) + mom.On("ResubmitOperations", context.Background(), id).Return(0, nil, nil) + mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(pool, nil) + mom.On("AddOrReuseOperation", context.Background(), mock.Anything).Return(nil) + mom.On("RunOperation", context.Background(), mock.Anything, true).Return(nil, nil) + + tokenPools := []*core.TokenPool{ + { + Name: "pool1", + Locator: "F1", + Connector: "magic-tokens", + Active: true, + }, + } + totalCount := int64(1) + filterResult := &ffapi.FilterResult{ + TotalCount: &totalCount, + } + mim.On("ResolveInputSigningKey", context.Background(), "key", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) + mdi.On("GetTokenPools", context.Background(), "ns1", mock.MatchedBy((func(f ffapi.AndFilter) bool { + info, _ := f.Finalize() + return info.Count && info.Limit == 1 + }))).Return(tokenPools, filterResult, nil) // If ResubmitOperations returns an operation it's because it found one to resubmit, so we return 2xx not 409, and don't expect an error _, err := am.TokenApproval(context.Background(), approval, false) @@ -275,7 +336,7 @@ func TestApprovalIdempotentNoOperationToResubmit(t *testing.T) { mth.On("SubmitNewTransaction", context.Background(), core.TransactionTypeTokenApproval, core.IdempotencyKey("idem1")).Return(id, &sqlcommon.IdempotencyError{ ExistingTXID: id, OriginalError: i18n.NewError(context.Background(), coremsgs.MsgIdempotencyKeyDuplicateTransaction, "idem1", id)}) - mom.On("ResubmitOperations", context.Background(), id).Return(nil, nil) + mom.On("ResubmitOperations", context.Background(), id).Return(1 /* one total */, nil /* none to resubmit */, nil) // If ResubmitOperations returns nil it's because there was no operation in initialized state, so we expect the regular 409 error back _, err := am.TokenApproval(context.Background(), approval, false) @@ -308,7 +369,7 @@ func TestApprovalIdempotentOperationErrorOnResubmit(t *testing.T) { mth.On("SubmitNewTransaction", context.Background(), core.TransactionTypeTokenApproval, core.IdempotencyKey("idem1")).Return(id, &sqlcommon.IdempotencyError{ ExistingTXID: id, OriginalError: i18n.NewError(context.Background(), coremsgs.MsgIdempotencyKeyDuplicateTransaction, "idem1", id)}) - mom.On("ResubmitOperations", context.Background(), id).Return(nil, fmt.Errorf("pop")) + mom.On("ResubmitOperations", context.Background(), id).Return(-1, nil, fmt.Errorf("pop")) // If ResubmitOperations returns an operation it's because it found one to resubmit, so we return 2xx not 409, and don't expect an error _, err := am.TokenApproval(context.Background(), approval, false) @@ -396,7 +457,7 @@ func TestApprovalUnconfirmedPool(t *testing.T) { pool := &core.TokenPool{ Locator: "F1", Connector: "magic-tokens", - State: core.TokenPoolStatePending, + Active: false, } mdi := am.database.(*databasemocks.Plugin) @@ -426,7 +487,7 @@ func TestApprovalIdentityFail(t *testing.T) { pool := &core.TokenPool{ Locator: "F1", Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, } mdi := am.database.(*databasemocks.Plugin) @@ -460,7 +521,7 @@ func TestApprovalFail(t *testing.T) { pool := &core.TokenPool{ Locator: "F1", Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, } mdi := am.database.(*databasemocks.Plugin) @@ -474,7 +535,7 @@ func TestApprovalFail(t *testing.T) { mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(approvalData) return op.Type == core.OpTypeTokenApproval && data.Pool == pool && data.Approval == &approval.TokenApproval - })).Return(nil, fmt.Errorf("pop")) + }), true).Return(nil, fmt.Errorf("pop")) _, err := am.TokenApproval(context.Background(), approval, false) assert.EqualError(t, err, "pop") @@ -536,7 +597,7 @@ func TestApprovalWithBroadcastMessage(t *testing.T) { } pool := &core.TokenPool{ Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, } mdi := am.database.(*databasemocks.Plugin) @@ -555,7 +616,7 @@ func TestApprovalWithBroadcastMessage(t *testing.T) { mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(approvalData) return op.Type == core.OpTypeTokenApproval && data.Pool == pool && data.Approval == &approval.TokenApproval - })).Return(nil, nil) + }), true).Return(nil, nil) _, err := am.TokenApproval(context.Background(), approval, false) assert.NoError(t, err) @@ -637,7 +698,7 @@ func TestApprovalWithBroadcastMessageSendFail(t *testing.T) { } pool := &core.TokenPool{ Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, } mdi := am.database.(*databasemocks.Plugin) @@ -732,7 +793,7 @@ func TestApprovalWithPrivateMessage(t *testing.T) { } pool := &core.TokenPool{ Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, } mdi := am.database.(*databasemocks.Plugin) @@ -751,7 +812,7 @@ func TestApprovalWithPrivateMessage(t *testing.T) { mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(approvalData) return op.Type == core.OpTypeTokenApproval && data.Pool == pool && data.Approval == &approval.TokenApproval - })).Return(nil, nil) + }), true).Return(nil, nil) _, err := am.TokenApproval(context.Background(), approval, false) assert.NoError(t, err) @@ -855,7 +916,7 @@ func TestApprovalOperationsFail(t *testing.T) { pool := &core.TokenPool{ Locator: "F1", Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, } mdi := am.database.(*databasemocks.Plugin) @@ -893,7 +954,7 @@ func TestTokenApprovalConfirm(t *testing.T) { pool := &core.TokenPool{ Locator: "F1", Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, } mdi := am.database.(*databasemocks.Plugin) @@ -908,7 +969,7 @@ func TestTokenApprovalConfirm(t *testing.T) { mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(approvalData) return op.Type == core.OpTypeTokenApproval && data.Pool == pool && data.Approval == &approval.TokenApproval - })).Return(nil, nil) + }), true).Return(nil, nil) msa.On("WaitForTokenApproval", context.Background(), mock.Anything, mock.Anything). Run(func(args mock.Arguments) { @@ -957,7 +1018,7 @@ func TestApprovalWithBroadcastConfirm(t *testing.T) { } pool := &core.TokenPool{ Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, } mdi := am.database.(*databasemocks.Plugin) @@ -989,7 +1050,7 @@ func TestApprovalWithBroadcastConfirm(t *testing.T) { mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(approvalData) return op.Type == core.OpTypeTokenApproval && data.Pool == pool && data.Approval == &approval.TokenApproval - })).Return(nil, nil) + }), true).Return(nil, nil) _, err := am.TokenApproval(context.Background(), approval, true) assert.NoError(t, err) @@ -1020,7 +1081,7 @@ func TestApprovalPrepare(t *testing.T) { pool := &core.TokenPool{ Locator: "F1", Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, } sender := am.NewApproval(approval) diff --git a/internal/assets/token_pool.go b/internal/assets/token_pool.go index 7f94bd99bf..36c88d4fd5 100644 --- a/internal/assets/token_pool.go +++ b/internal/assets/token_pool.go @@ -18,6 +18,7 @@ package assets import ( "context" + "fmt" "github.com/hyperledger/firefly-common/pkg/ffapi" "github.com/hyperledger/firefly-common/pkg/fftypes" @@ -27,6 +28,7 @@ import ( "github.com/hyperledger/firefly/internal/database/sqlcommon" "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/pkg/core" + "github.com/hyperledger/firefly/pkg/database" ) func (am *assetManager) CreateTokenPool(ctx context.Context, pool *core.TokenPoolInput, waitConfirm bool) (*core.TokenPool, error) { @@ -77,22 +79,35 @@ func (am *assetManager) createTokenPoolInternal(ctx context.Context, pool *core. } var newOperation *core.Operation - var resubmittedOperation *core.Operation + var resubmitted []*core.Operation + var resubmitErr error err = am.database.RunAsGroup(ctx, func(ctx context.Context) (err error) { txid, err := am.txHelper.SubmitNewTransaction(ctx, core.TransactionTypeTokenPool, pool.IdempotencyKey) if err != nil { - var resubmitErr error - // Check if we've clashed on idempotency key. There might be operations still in "Initialized" state that need // submitting to their handlers. + resubmitWholeTX := false if idemErr, ok := err.(*sqlcommon.IdempotencyError); ok { - resubmittedOperation, resubmitErr = am.operations.ResubmitOperations(ctx, idemErr.ExistingTXID) + var total int + total, resubmitted, resubmitErr = am.operations.ResubmitOperations(ctx, idemErr.ExistingTXID) if resubmitErr != nil { // Error doing resubmit, return the new error return resubmitErr } + if total == 0 { + // We didn't do anything last time - just start again + txid = idemErr.ExistingTXID + resubmitWholeTX = true + err = nil + } else if len(resubmitted) > 0 { + pool.TX.ID = idemErr.ExistingTXID + pool.TX.Type = core.TransactionTypeTokenPool + err = nil + } + } + if !resubmitWholeTX { + return err } - return err } pool.TX.ID = txid @@ -108,7 +123,7 @@ func (am *assetManager) createTokenPoolInternal(ctx context.Context, pool *core. } return err }) - if resubmittedOperation != nil { + if len(resubmitted) > 0 { // We resubmitted a previously initialized operation, don't run a new one return &pool.TokenPool, nil } @@ -117,7 +132,7 @@ func (am *assetManager) createTokenPoolInternal(ctx context.Context, pool *core. return nil, err } - _, err = am.operations.RunOperation(ctx, opCreatePool(newOperation, &pool.TokenPool)) + _, err = am.operations.RunOperation(ctx, opCreatePool(newOperation, &pool.TokenPool), pool.IdempotencyKey != "") return &pool.TokenPool, err } @@ -148,7 +163,10 @@ func (am *assetManager) ActivateTokenPool(ctx context.Context, pool *core.TokenP return err } - _, err = am.operations.RunOperation(ctx, opActivatePool(op, pool)) + _, err = am.operations.RunOperation(ctx, opActivatePool(op, pool), + false, // TODO: this operation should be made idempotent, but cannot inherit this from the TX per our normal semantics + // as the transaction is only on the submitting side and this is triggered on all parties. + ) return err } @@ -156,25 +174,39 @@ func (am *assetManager) GetTokenPools(ctx context.Context, filter ffapi.AndFilte return am.database.GetTokenPools(ctx, am.namespace, filter) } -func (am *assetManager) GetTokenPool(ctx context.Context, connector, poolName string) (*core.TokenPool, error) { - if _, err := am.selectTokenPlugin(ctx, connector); err != nil { - return nil, err +func (am *assetManager) GetTokenPoolByLocator(ctx context.Context, connector, poolLocator string) (*core.TokenPool, error) { + cacheKey := fmt.Sprintf("ns=%s,connector=%s,poollocator=%s", am.namespace, connector, poolLocator) + if cachedValue := am.cache.Get(cacheKey); cachedValue != nil { + log.L(ctx).Debugf("Token pool cache hit: %s", cacheKey) + return cachedValue.(*core.TokenPool), nil } - if err := fftypes.ValidateFFNameFieldNoUUID(ctx, poolName, "name"); err != nil { + log.L(ctx).Debugf("Token pool cache miss: %s", cacheKey) + if _, err := am.selectTokenPlugin(ctx, connector); err != nil { return nil, err } - pool, err := am.database.GetTokenPool(ctx, am.namespace, poolName) - if err != nil { + + fb := database.TokenPoolQueryFactory.NewFilter(ctx) + results, _, err := am.database.GetTokenPools(ctx, am.namespace, fb.And( + fb.Eq("connector", connector), + fb.Eq("locator", poolLocator), + )) + if err != nil || len(results) == 0 { return nil, err } - if pool == nil { - return nil, i18n.NewError(ctx, coremsgs.Msg404NotFound) - } - return pool, nil + + // Cache the result + am.cache.Set(cacheKey, results[0]) + return results[0], nil } func (am *assetManager) GetTokenPoolByNameOrID(ctx context.Context, poolNameOrID string) (*core.TokenPool, error) { var pool *core.TokenPool + cacheKey := fmt.Sprintf("ns=%s,poolnameorid=%s", am.namespace, poolNameOrID) + if cachedValue := am.cache.Get(cacheKey); cachedValue != nil { + log.L(ctx).Debugf("Token pool cache hit: %s", cacheKey) + return cachedValue.(*core.TokenPool), nil + } + log.L(ctx).Debugf("Token pool cache miss: %s", cacheKey) poolID, err := fftypes.ParseUUID(ctx, poolNameOrID) if err != nil { @@ -184,15 +216,30 @@ func (am *assetManager) GetTokenPoolByNameOrID(ctx context.Context, poolNameOrID if pool, err = am.database.GetTokenPool(ctx, am.namespace, poolNameOrID); err != nil { return nil, err } - } else if pool, err = am.database.GetTokenPoolByID(ctx, am.namespace, poolID); err != nil { + } else if pool, err = am.GetTokenPoolByID(ctx, poolID); err != nil { return nil, err } if pool == nil { return nil, i18n.NewError(ctx, coremsgs.Msg404NotFound) } + // Cache the result + am.cache.Set(cacheKey, pool) return pool, nil } +func (am *assetManager) removeTokenPoolFromCache(ctx context.Context, pool *core.TokenPool) { + cacheKeyName := fmt.Sprintf("ns=%s,poolnameorid=%s", am.namespace, pool.Name) + cacheKeyID := fmt.Sprintf("ns=%s,poolnameorid=%s", am.namespace, pool.ID) + cacheKeyLocator := fmt.Sprintf("ns=%s,connector=%s,poollocator=%s", am.namespace, pool.Connector, pool.Locator) + am.cache.Delete(cacheKeyName) + am.cache.Delete(cacheKeyID) + am.cache.Delete(cacheKeyLocator) +} + +func (am *assetManager) GetTokenPoolByID(ctx context.Context, poolID *fftypes.UUID) (*core.TokenPool, error) { + return am.database.GetTokenPoolByID(ctx, am.namespace, poolID) +} + func (am *assetManager) ResolvePoolMethods(ctx context.Context, pool *core.TokenPool) error { plugin, err := am.selectTokenPlugin(ctx, pool.Connector) if err == nil && pool.Interface != nil && pool.Interface.ID != nil && am.contracts != nil { @@ -204,3 +251,33 @@ func (am *assetManager) ResolvePoolMethods(ctx context.Context, pool *core.Token } return err } + +func (am *assetManager) DeleteTokenPool(ctx context.Context, poolNameOrID string) error { + return am.database.RunAsGroup(ctx, func(ctx context.Context) error { + pool, err := am.GetTokenPoolByNameOrID(ctx, poolNameOrID) + if err != nil { + return err + } + if pool.Published { + return i18n.NewError(ctx, coremsgs.MsgCannotDeletePublished) + } + plugin, err := am.selectTokenPlugin(ctx, pool.Connector) + if err != nil { + return err + } + am.removeTokenPoolFromCache(ctx, pool) + if err = am.database.DeleteTokenPool(ctx, am.namespace, pool.ID); err != nil { + return err + } + if err = am.database.DeleteTokenTransfers(ctx, am.namespace, pool.ID); err != nil { + return err + } + if err = am.database.DeleteTokenApprovals(ctx, am.namespace, pool.ID); err != nil { + return err + } + if err = am.database.DeleteTokenBalances(ctx, am.namespace, pool.ID); err != nil { + return err + } + return plugin.DeactivateTokenPool(ctx, pool) + }) +} diff --git a/internal/assets/token_pool_test.go b/internal/assets/token_pool_test.go index b04a1fa6a4..92025f5cae 100644 --- a/internal/assets/token_pool_test.go +++ b/internal/assets/token_pool_test.go @@ -19,6 +19,7 @@ import ( "fmt" "testing" + "github.com/hyperledger/firefly-common/pkg/ffapi" "github.com/hyperledger/firefly-common/pkg/fftypes" "github.com/hyperledger/firefly-common/pkg/i18n" "github.com/hyperledger/firefly/internal/coremsgs" @@ -109,7 +110,7 @@ func TestCreateTokenPoolDefaultConnectorSuccess(t *testing.T) { mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(createPoolData) return op.Type == core.OpTypeTokenCreatePool && data.Pool == &pool.TokenPool - })).Return(nil, nil) + }), true).Return(nil, nil) _, err := am.CreateTokenPool(context.Background(), pool, false) assert.NoError(t, err) @@ -149,7 +150,45 @@ func TestCreateTokenPoolIdempotentResubmit(t *testing.T) { Return(id, &sqlcommon.IdempotencyError{ ExistingTXID: id, OriginalError: i18n.NewError(context.Background(), coremsgs.MsgIdempotencyKeyDuplicateTransaction, "idem1", id)}) - mom.On("ResubmitOperations", context.Background(), id).Return(op, nil) + mom.On("ResubmitOperations", context.Background(), id).Return(1, []*core.Operation{op}, nil) + + _, err := am.CreateTokenPool(context.Background(), pool, false) + + // SubmitNewTransction returned 409 idempotency clash, ResubmitOperations returned that it resubmitted an operation. Shouldn't + // see the original 409 Conflict error + assert.NoError(t, err) + + mdi.AssertExpectations(t) + mim.AssertExpectations(t) + mth.AssertExpectations(t) + mom.AssertExpectations(t) +} + +func TestCreateTokenPoolIdempotentResubmitAll(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + var id = fftypes.NewUUID() + + pool := &core.TokenPoolInput{ + TokenPool: core.TokenPool{ + Name: "testpool", + }, + IdempotencyKey: "idem1", + } + + mdi := am.database.(*databasemocks.Plugin) + mim := am.identity.(*identitymanagermocks.Manager) + mth := am.txHelper.(*txcommonmocks.Helper) + mom := am.operations.(*operationmocks.Manager) + mdi.On("GetTokenPool", context.Background(), "ns1", "testpool").Return(nil, nil) + mim.On("ResolveInputSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("resolved-key", nil) + mth.On("SubmitNewTransaction", context.Background(), core.TransactionTypeTokenPool, core.IdempotencyKey("idem1")). + Return(id, &sqlcommon.IdempotencyError{ + ExistingTXID: id, + OriginalError: i18n.NewError(context.Background(), coremsgs.MsgIdempotencyKeyDuplicateTransaction, "idem1", id)}) + mom.On("ResubmitOperations", context.Background(), id).Return(0, nil, nil) + mom.On("AddOrReuseOperation", context.Background(), mock.Anything).Return(nil) + mom.On("RunOperation", context.Background(), mock.Anything, true).Return(nil, nil) _, err := am.CreateTokenPool(context.Background(), pool, false) @@ -185,7 +224,7 @@ func TestCreateTokenPoolIdempotentNoOperationToResubmit(t *testing.T) { Return(id, &sqlcommon.IdempotencyError{ ExistingTXID: id, OriginalError: i18n.NewError(context.Background(), coremsgs.MsgIdempotencyKeyDuplicateTransaction, "idem1", id)}) - mom.On("ResubmitOperations", context.Background(), id).Return(nil, nil) + mom.On("ResubmitOperations", context.Background(), id).Return(1 /* total */, nil /* to resubmit */, nil) _, err := am.CreateTokenPool(context.Background(), pool, false) @@ -222,7 +261,7 @@ func TestCreateTokenPoolIdempotentErrorOnOperationResubmit(t *testing.T) { Return(id, &sqlcommon.IdempotencyError{ ExistingTXID: id, OriginalError: i18n.NewError(context.Background(), coremsgs.MsgIdempotencyKeyDuplicateTransaction, "idem1", id)}) - mom.On("ResubmitOperations", context.Background(), id).Return(nil, fmt.Errorf("pop")) + mom.On("ResubmitOperations", context.Background(), id).Return(-1, nil, fmt.Errorf("pop")) _, err := am.CreateTokenPool(context.Background(), pool, false) @@ -368,7 +407,7 @@ func TestCreateTokenPoolFail(t *testing.T) { mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(createPoolData) return op.Type == core.OpTypeTokenCreatePool && data.Pool == &pool.TokenPool - })).Return(nil, fmt.Errorf("pop")) + }), true).Return(nil, fmt.Errorf("pop")) _, err := am.CreateTokenPool(context.Background(), pool, false) assert.Regexp(t, "pop", err) @@ -486,7 +525,7 @@ func TestCreateTokenPoolSyncSuccess(t *testing.T) { mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(createPoolData) return op.Type == core.OpTypeTokenCreatePool && data.Pool == &pool.TokenPool - })).Return(nil, nil) + }), true).Return(nil, nil) _, err := am.CreateTokenPool(context.Background(), pool, false) assert.NoError(t, err) @@ -520,7 +559,7 @@ func TestCreateTokenPoolAsyncSuccess(t *testing.T) { mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(createPoolData) return op.Type == core.OpTypeTokenCreatePool && data.Pool == &pool.TokenPool - })).Return(nil, nil) + }), true).Return(nil, nil) _, err := am.CreateTokenPool(context.Background(), pool, false) assert.NoError(t, err) @@ -561,7 +600,7 @@ func TestCreateTokenPoolConfirm(t *testing.T) { mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(createPoolData) return op.Type == core.OpTypeTokenCreatePool && data.Pool == &pool.TokenPool - })).Return(nil, nil) + }), true).Return(nil, nil) _, err := am.CreateTokenPool(context.Background(), pool, true) assert.NoError(t, err) @@ -595,7 +634,7 @@ func TestActivateTokenPool(t *testing.T) { mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(activatePoolData) return op.Type == core.OpTypeTokenActivatePool && data.Pool == pool - })).Return(nil, nil) + }), false).Return(nil, nil) err := am.ActivateTokenPool(context.Background(), pool) assert.NoError(t, err) @@ -664,7 +703,7 @@ func TestActivateTokenPoolFail(t *testing.T) { mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(activatePoolData) return op.Type == core.OpTypeTokenActivatePool && data.Pool == pool - })).Return(nil, fmt.Errorf("pop")) + }), false).Return(nil, fmt.Errorf("pop")) err := am.ActivateTokenPool(context.Background(), pool) assert.EqualError(t, err, "pop") @@ -736,7 +775,7 @@ func TestActivateTokenPoolSyncSuccess(t *testing.T) { mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(activatePoolData) return op.Type == core.OpTypeTokenActivatePool && data.Pool == pool - })).Return(nil, nil) + }), false).Return(nil, nil) err := am.ActivateTokenPool(context.Background(), pool) assert.NoError(t, err) @@ -745,56 +784,77 @@ func TestActivateTokenPoolSyncSuccess(t *testing.T) { mom.AssertExpectations(t) } -func TestGetTokenPool(t *testing.T) { +func TestGetTokenPoolByLocator(t *testing.T) { am, cancel := newTestAssets(t) defer cancel() mdi := am.database.(*databasemocks.Plugin) - mdi.On("GetTokenPool", context.Background(), "ns1", "abc").Return(&core.TokenPool{}, nil) - _, err := am.GetTokenPool(context.Background(), "magic-tokens", "abc") + mdi.On("GetTokenPools", context.Background(), "ns1", mock.MatchedBy(func(filter ffapi.AndFilter) bool { + f, err := filter.Finalize() + assert.NoError(t, err) + assert.Len(t, f.Children, 2) + assert.Equal(t, "connector", f.Children[0].Field) + val, _ := f.Children[0].Value.Value() + assert.Equal(t, "magic-tokens", val) + assert.Equal(t, "locator", f.Children[1].Field) + val, _ = f.Children[1].Value.Value() + assert.Equal(t, "abc", val) + return true + })).Return([]*core.TokenPool{{}}, nil, nil) + result, err := am.GetTokenPoolByLocator(context.Background(), "magic-tokens", "abc") assert.NoError(t, err) + assert.NotNil(t, result) mdi.AssertExpectations(t) } -func TestGetTokenPoolNotFound(t *testing.T) { +func TestGetTokenPoolByLocatorBadPlugin(t *testing.T) { am, cancel := newTestAssets(t) defer cancel() + am.tokens = make(map[string]tokens.Plugin) mdi := am.database.(*databasemocks.Plugin) - mdi.On("GetTokenPool", context.Background(), "ns1", "abc").Return(nil, nil) - _, err := am.GetTokenPool(context.Background(), "magic-tokens", "abc") - assert.Regexp(t, "FF10109", err) + _, err := am.GetTokenPoolByLocator(context.Background(), "magic-tokens", "abc") + assert.Regexp(t, "FF10272", err) mdi.AssertExpectations(t) } -func TestGetTokenPoolFailed(t *testing.T) { +func TestGetTokenPoolByLocatorCached(t *testing.T) { am, cancel := newTestAssets(t) defer cancel() mdi := am.database.(*databasemocks.Plugin) - mdi.On("GetTokenPool", context.Background(), "ns1", "abc").Return(nil, fmt.Errorf("pop")) - _, err := am.GetTokenPool(context.Background(), "magic-tokens", "abc") - assert.Regexp(t, "pop", err) + am.cache.Set("ns=ns1,connector=magic-tokens,poollocator=abc", &core.TokenPool{}) + _, err := am.GetTokenPoolByLocator(context.Background(), "magic-tokens", "abc") + assert.NoError(t, err) mdi.AssertExpectations(t) } -func TestGetTokenPoolBadPlugin(t *testing.T) { +func TestGetTokenPoolByLocatorNotFound(t *testing.T) { am, cancel := newTestAssets(t) defer cancel() - _, err := am.GetTokenPool(context.Background(), "", "") - assert.Regexp(t, "FF10272", err) + mdi := am.database.(*databasemocks.Plugin) + mdi.On("GetTokenPools", context.Background(), "ns1", mock.Anything).Return([]*core.TokenPool{}, nil, nil) + result, err := am.GetTokenPoolByLocator(context.Background(), "magic-tokens", "abc") + assert.NoError(t, err) + assert.Nil(t, result) + + mdi.AssertExpectations(t) } -func TestGetTokenPoolBadName(t *testing.T) { +func TestGetTokenPoolByLocatorFail(t *testing.T) { am, cancel := newTestAssets(t) defer cancel() - _, err := am.GetTokenPool(context.Background(), "magic-tokens", "") - assert.Regexp(t, "FF00140", err) + mdi := am.database.(*databasemocks.Plugin) + mdi.On("GetTokenPools", context.Background(), "ns1", mock.Anything).Return(nil, nil, fmt.Errorf("pop")) + _, err := am.GetTokenPoolByLocator(context.Background(), "magic-tokens", "abc") + assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) } func TestGetTokenPoolByID(t *testing.T) { @@ -810,6 +870,20 @@ func TestGetTokenPoolByID(t *testing.T) { mdi.AssertExpectations(t) } +func TestGetTokenPoolByIDCached(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + u := fftypes.NewUUID() + mdi := am.database.(*databasemocks.Plugin) + + am.cache.Set(fmt.Sprintf("ns=ns1,poolnameorid=%s", u.String()), &core.TokenPool{}) + _, err := am.GetTokenPoolByNameOrID(context.Background(), u.String()) + assert.NoError(t, err) + + mdi.AssertExpectations(t) +} + func TestGetTokenPoolByIDBadNamespace(t *testing.T) { am, cancel := newTestAssets(t) defer cancel() @@ -917,3 +991,179 @@ func TestResolvePoolMethods(t *testing.T) { mcm.AssertExpectations(t) mti.AssertExpectations(t) } + +func TestDeletePoolNotFound(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + mdi := am.database.(*databasemocks.Plugin) + mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(nil, nil) + + err := am.DeleteTokenPool(context.Background(), "pool1") + assert.Regexp(t, "FF10109", err) + + mdi.AssertExpectations(t) +} + +func TestDeletePoolFail(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + pool := &core.TokenPool{ + ID: fftypes.NewUUID(), + Connector: "magic-tokens", + Interface: &fftypes.FFIReference{ + ID: fftypes.NewUUID(), + }, + } + + mdi := am.database.(*databasemocks.Plugin) + mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(pool, nil) + mdi.On("DeleteTokenPool", context.Background(), "ns1", pool.ID).Return(fmt.Errorf("pop")) + + err := am.DeleteTokenPool(context.Background(), "pool1") + assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) +} + +func TestDeletePoolPublished(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + pool := &core.TokenPool{ + ID: fftypes.NewUUID(), + Connector: "magic-tokens", + Interface: &fftypes.FFIReference{ + ID: fftypes.NewUUID(), + }, + Published: true, + } + + mdi := am.database.(*databasemocks.Plugin) + mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(pool, nil) + + err := am.DeleteTokenPool(context.Background(), "pool1") + assert.Regexp(t, "FF10449", err) + + mdi.AssertExpectations(t) +} + +func TestDeletePoolTransfersFail(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + pool := &core.TokenPool{ + ID: fftypes.NewUUID(), + Connector: "magic-tokens", + Interface: &fftypes.FFIReference{ + ID: fftypes.NewUUID(), + }, + } + + mdi := am.database.(*databasemocks.Plugin) + mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(pool, nil) + mdi.On("DeleteTokenPool", context.Background(), "ns1", pool.ID).Return(nil) + mdi.On("DeleteTokenTransfers", context.Background(), "ns1", pool.ID).Return(fmt.Errorf("pop")) + + err := am.DeleteTokenPool(context.Background(), "pool1") + assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) +} + +func TestDeletePoolApprovalsFail(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + pool := &core.TokenPool{ + ID: fftypes.NewUUID(), + Connector: "magic-tokens", + Interface: &fftypes.FFIReference{ + ID: fftypes.NewUUID(), + }, + } + + mdi := am.database.(*databasemocks.Plugin) + mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(pool, nil) + mdi.On("DeleteTokenPool", context.Background(), "ns1", pool.ID).Return(nil) + mdi.On("DeleteTokenTransfers", context.Background(), "ns1", pool.ID).Return(nil) + mdi.On("DeleteTokenApprovals", context.Background(), "ns1", pool.ID).Return(fmt.Errorf("pop")) + + err := am.DeleteTokenPool(context.Background(), "pool1") + assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) +} + +func TestDeletePoolBalancesFail(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + pool := &core.TokenPool{ + ID: fftypes.NewUUID(), + Connector: "magic-tokens", + Interface: &fftypes.FFIReference{ + ID: fftypes.NewUUID(), + }, + } + + mdi := am.database.(*databasemocks.Plugin) + mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(pool, nil) + mdi.On("DeleteTokenPool", context.Background(), "ns1", pool.ID).Return(nil) + mdi.On("DeleteTokenTransfers", context.Background(), "ns1", pool.ID).Return(nil) + mdi.On("DeleteTokenApprovals", context.Background(), "ns1", pool.ID).Return(nil) + mdi.On("DeleteTokenBalances", context.Background(), "ns1", pool.ID).Return(fmt.Errorf("pop")) + + err := am.DeleteTokenPool(context.Background(), "pool1") + assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) +} + +func TestDeletePoolSuccess(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + pool := &core.TokenPool{ + ID: fftypes.NewUUID(), + Connector: "magic-tokens", + Interface: &fftypes.FFIReference{ + ID: fftypes.NewUUID(), + }, + } + + mdi := am.database.(*databasemocks.Plugin) + mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(pool, nil) + mdi.On("DeleteTokenPool", context.Background(), "ns1", pool.ID).Return(nil) + mdi.On("DeleteTokenTransfers", context.Background(), "ns1", pool.ID).Return(nil) + mdi.On("DeleteTokenApprovals", context.Background(), "ns1", pool.ID).Return(nil) + mdi.On("DeleteTokenBalances", context.Background(), "ns1", pool.ID).Return(nil) + + mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) + mti.On("DeactivateTokenPool", context.Background(), pool).Return(nil) + + err := am.DeleteTokenPool(context.Background(), "pool1") + assert.NoError(t, err) + + mdi.AssertExpectations(t) + mti.AssertExpectations(t) +} + +func TestDeletePoolBadPlugin(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + pool := &core.TokenPool{ + ID: fftypes.NewUUID(), + Connector: "BAD", + } + + mdi := am.database.(*databasemocks.Plugin) + mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(pool, nil) + + err := am.DeleteTokenPool(context.Background(), "pool1") + assert.Regexp(t, "FF10272", err) + + mdi.AssertExpectations(t) +} diff --git a/internal/assets/token_transfer.go b/internal/assets/token_transfer.go index 2dc1c64628..0d26c1c06a 100644 --- a/internal/assets/token_transfer.go +++ b/internal/assets/token_transfer.go @@ -43,18 +43,20 @@ func (am *assetManager) GetTokenTransferByID(ctx context.Context, id string) (*c func (am *assetManager) NewTransfer(transfer *core.TokenTransferInput) syncasync.Sender { sender := &transferSender{ - mgr: am, - transfer: transfer, + mgr: am, + transfer: transfer, + idempotentSubmit: transfer.IdempotencyKey != "", } sender.setDefaults() return sender } type transferSender struct { - mgr *assetManager - transfer *core.TokenTransferInput - resolved bool - msgSender syncasync.Sender + mgr *assetManager + transfer *core.TokenTransferInput + resolved bool + msgSender syncasync.Sender + idempotentSubmit bool } // sendMethod is the specific operation requested of the transferSender. @@ -101,8 +103,8 @@ func (am *assetManager) validateTransfer(ctx context.Context, transfer *core.Tok transfer.TokenTransfer.Pool = pool.ID transfer.TokenTransfer.Connector = pool.Connector - if pool.State != core.TokenPoolStateConfirmed { - return nil, i18n.NewError(ctx, coremsgs.MsgTokenPoolNotConfirmed) + if !pool.Active { + return nil, i18n.NewError(ctx, coremsgs.MsgTokenPoolNotActive) } if transfer.Key, err = am.identity.ResolveInputSigningKey(ctx, transfer.Key, am.keyNormalization); err != nil { return nil, err @@ -193,17 +195,29 @@ func (s *transferSender) resolve(ctx context.Context) (opResubmitted bool, err e if err != nil { // Check if we've clashed on idempotency key. There might be operations still in "Initialized" state that need // submitting to their handlers. Note that we'll return the result of resubmitting the operation, not a 409 Conflict error + resubmitWholeTX := false if idemErr, ok := err.(*sqlcommon.IdempotencyError); ok { - operation, resubmitErr := s.mgr.operations.ResubmitOperations(ctx, idemErr.ExistingTXID) + total, resubmitted, resubmitErr := s.mgr.operations.ResubmitOperations(ctx, idemErr.ExistingTXID) if resubmitErr != nil { // Error doing resubmit, return the new error err = resubmitErr - } else if operation != nil { - // We successfully resubmitted an initialized operation, return 2xx not 409 + } + if total == 0 { + // We didn't do anything last time - just start again + txid = idemErr.ExistingTXID + resubmitWholeTX = true + err = nil + } else if len(resubmitted) > 0 { + // We resubmitted something - translate the status code to 200 (true return) + s.transfer.TX.ID = idemErr.ExistingTXID + s.transfer.TX.Type = core.TransactionTypeTokenTransfer return true, nil } + + } + if !resubmitWholeTX { + return false, err } - return true, err } s.transfer.TX.ID = txid s.transfer.TX.Type = core.TransactionTypeTokenTransfer @@ -280,7 +294,7 @@ func (s *transferSender) sendInternal(ctx context.Context, method sendMethod) (e } } - _, err = s.mgr.operations.RunOperation(ctx, opTransfer(op, pool, &s.transfer.TokenTransfer)) + _, err = s.mgr.operations.RunOperation(ctx, opTransfer(op, pool, &s.transfer.TokenTransfer), s.idempotentSubmit) return err } diff --git a/internal/assets/token_transfer_test.go b/internal/assets/token_transfer_test.go index a7f2cc3aad..cc900f4bd1 100644 --- a/internal/assets/token_transfer_test.go +++ b/internal/assets/token_transfer_test.go @@ -87,7 +87,7 @@ func TestMintTokensSuccess(t *testing.T) { } pool := &core.TokenPool{ Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, } mdi := am.database.(*databasemocks.Plugin) @@ -101,7 +101,7 @@ func TestMintTokensSuccess(t *testing.T) { mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(transferData) return op.Type == core.OpTypeTokenTransfer && data.Pool == pool && data.Transfer == &mint.TokenTransfer - })).Return(nil, nil) + }), true).Return(nil, nil) _, err := am.MintTokens(context.Background(), mint, false) assert.NoError(t, err) @@ -132,7 +132,46 @@ func TestMintTokensIdempotentResubmit(t *testing.T) { mth.On("SubmitNewTransaction", context.Background(), core.TransactionTypeTokenTransfer, core.IdempotencyKey("idem1")).Return(id, &sqlcommon.IdempotencyError{ ExistingTXID: id, OriginalError: i18n.NewError(context.Background(), coremsgs.MsgIdempotencyKeyDuplicateTransaction, "idem1", id)}) - mom.On("ResubmitOperations", context.Background(), id).Return(op, nil) + mom.On("ResubmitOperations", context.Background(), id).Return(1, []*core.Operation{op}, nil) + + // If ResubmitOperations returns an operation it's because it found one to resubmit, so we return 2xx not 409, and don't expect an error + _, err := am.MintTokens(context.Background(), mint, false) + assert.NoError(t, err) + + mth.AssertExpectations(t) + mom.AssertExpectations(t) +} + +func TestMintTokensIdempotentResubmitAll(t *testing.T) { + am, cancel := newTestAssetsWithMetrics(t) + defer cancel() + var id = fftypes.NewUUID() + + mint := &core.TokenTransferInput{ + TokenTransfer: core.TokenTransfer{ + Amount: *fftypes.NewFFBigInt(5), + }, + Pool: "pool1", + IdempotencyKey: "idem1", + } + pool := &core.TokenPool{ + Name: "pool1", + Connector: "magic-tokens", + Active: true, + } + + mdi := am.database.(*databasemocks.Plugin) + mim := am.identity.(*identitymanagermocks.Manager) + mth := am.txHelper.(*txcommonmocks.Helper) + mom := am.operations.(*operationmocks.Manager) + mth.On("SubmitNewTransaction", context.Background(), core.TransactionTypeTokenTransfer, core.IdempotencyKey("idem1")).Return(id, &sqlcommon.IdempotencyError{ + ExistingTXID: id, + OriginalError: i18n.NewError(context.Background(), coremsgs.MsgIdempotencyKeyDuplicateTransaction, "idem1", id)}) + mom.On("ResubmitOperations", context.Background(), id).Return(0, nil, nil) + mim.On("ResolveInputSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) + mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(pool, nil) + mom.On("AddOrReuseOperation", context.Background(), mock.Anything).Return(nil) + mom.On("RunOperation", context.Background(), mock.Anything, true).Return(nil, nil) // If ResubmitOperations returns an operation it's because it found one to resubmit, so we return 2xx not 409, and don't expect an error _, err := am.MintTokens(context.Background(), mint, false) @@ -160,7 +199,7 @@ func TestMintTokensIdempotentNoOperationToResubmit(t *testing.T) { mth.On("SubmitNewTransaction", context.Background(), core.TransactionTypeTokenTransfer, core.IdempotencyKey("idem1")).Return(id, &sqlcommon.IdempotencyError{ ExistingTXID: id, OriginalError: i18n.NewError(context.Background(), coremsgs.MsgIdempotencyKeyDuplicateTransaction, "idem1", id)}) - mom.On("ResubmitOperations", context.Background(), id).Return(nil, nil) + mom.On("ResubmitOperations", context.Background(), id).Return(1 /* total */, nil /* to resumit */, nil) // If ResubmitOperations returns nil it's because there was no operation in initialized state, so we expect the regular 409 error back _, err := am.MintTokens(context.Background(), mint, false) @@ -189,7 +228,7 @@ func TestMintTokensIdempotentErrorOnResubmit(t *testing.T) { mth.On("SubmitNewTransaction", context.Background(), core.TransactionTypeTokenTransfer, core.IdempotencyKey("idem1")).Return(id, &sqlcommon.IdempotencyError{ ExistingTXID: id, OriginalError: i18n.NewError(context.Background(), coremsgs.MsgIdempotencyKeyDuplicateTransaction, "idem1", id)}) - mom.On("ResubmitOperations", context.Background(), id).Return(nil, fmt.Errorf("pop")) + mom.On("ResubmitOperations", context.Background(), id).Return(-1, nil, fmt.Errorf("pop")) // If ResubmitOperations returns nil it's because there was no operation in initialized state, so we expect the regular 409 error back _, err := am.MintTokens(context.Background(), mint, false) @@ -213,7 +252,7 @@ func TestMintTokensBadConnector(t *testing.T) { } pool := &core.TokenPool{ Connector: "bad", - State: core.TokenPoolStateConfirmed, + Active: true, } mdi := am.database.(*databasemocks.Plugin) @@ -253,7 +292,7 @@ func TestMintTokenDefaultPoolSuccess(t *testing.T) { { Name: "pool1", Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, }, } totalCount := int64(1) @@ -270,7 +309,7 @@ func TestMintTokenDefaultPoolSuccess(t *testing.T) { mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(transferData) return op.Type == core.OpTypeTokenTransfer && data.Pool == tokenPools[0] && data.Transfer == &mint.TokenTransfer - })).Return(nil, nil) + }), true).Return(nil, nil) _, err := am.MintTokens(context.Background(), mint, false) assert.NoError(t, err) @@ -416,7 +455,7 @@ func TestMintTokensIdentityFail(t *testing.T) { } pool := &core.TokenPool{ Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, } mdi := am.database.(*databasemocks.Plugin) @@ -447,7 +486,7 @@ func TestMintTokensFail(t *testing.T) { } pool := &core.TokenPool{ Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, } mdi := am.database.(*databasemocks.Plugin) @@ -461,7 +500,7 @@ func TestMintTokensFail(t *testing.T) { mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(transferData) return op.Type == core.OpTypeTokenTransfer && data.Pool == pool && data.Transfer == &mint.TokenTransfer - })).Return(nil, fmt.Errorf("pop")) + }), true).Return(nil, fmt.Errorf("pop")) _, err := am.MintTokens(context.Background(), mint, false) assert.EqualError(t, err, "pop") @@ -486,7 +525,7 @@ func TestMintTokensOperationFail(t *testing.T) { pool := &core.TokenPool{ Locator: "F1", Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, } mdi := am.database.(*databasemocks.Plugin) @@ -520,7 +559,7 @@ func TestMintTokensConfirm(t *testing.T) { } pool := &core.TokenPool{ Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, } mdi := am.database.(*databasemocks.Plugin) @@ -541,7 +580,7 @@ func TestMintTokensConfirm(t *testing.T) { mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(transferData) return op.Type == core.OpTypeTokenTransfer && data.Pool == pool && data.Transfer == &mint.TokenTransfer - })).Return(nil, fmt.Errorf("pop")) + }), true).Return(nil, fmt.Errorf("pop")) _, err := am.MintTokens(context.Background(), mint, true) assert.NoError(t, err) @@ -564,7 +603,7 @@ func TestBurnTokensSuccess(t *testing.T) { } pool := &core.TokenPool{ Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, } mdi := am.database.(*databasemocks.Plugin) @@ -578,7 +617,7 @@ func TestBurnTokensSuccess(t *testing.T) { mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(transferData) return op.Type == core.OpTypeTokenTransfer && data.Pool == pool && data.Transfer == &burn.TokenTransfer - })).Return(nil, nil) + }), true).Return(nil, nil) _, err := am.BurnTokens(context.Background(), burn, false) assert.NoError(t, err) @@ -602,7 +641,7 @@ func TestBurnTokensIdentityFail(t *testing.T) { } pool := &core.TokenPool{ Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, } mdi := am.database.(*databasemocks.Plugin) @@ -633,7 +672,7 @@ func TestBurnTokensConfirm(t *testing.T) { } pool := &core.TokenPool{ Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, } mdi := am.database.(*databasemocks.Plugin) @@ -654,7 +693,7 @@ func TestBurnTokensConfirm(t *testing.T) { mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(transferData) return op.Type == core.OpTypeTokenTransfer && data.Pool == pool && data.Transfer == &burn.TokenTransfer - })).Return(nil, nil) + }), true).Return(nil, nil) _, err := am.BurnTokens(context.Background(), burn, true) assert.NoError(t, err) @@ -680,7 +719,7 @@ func TestTransferTokensSuccess(t *testing.T) { } pool := &core.TokenPool{ Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, } mdi := am.database.(*databasemocks.Plugin) @@ -694,7 +733,7 @@ func TestTransferTokensSuccess(t *testing.T) { mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(transferData) return op.Type == core.OpTypeTokenTransfer && data.Pool == pool && data.Transfer == &transfer.TokenTransfer - })).Return(nil, nil) + }), true).Return(nil, nil) _, err := am.TransferTokens(context.Background(), transfer, false) assert.NoError(t, err) @@ -721,7 +760,7 @@ func TestTransferTokensUnconfirmedPool(t *testing.T) { pool := &core.TokenPool{ Locator: "F1", Connector: "magic-tokens", - State: core.TokenPoolStatePending, + Active: false, } mdi := am.database.(*databasemocks.Plugin) @@ -751,7 +790,7 @@ func TestTransferTokensIdentityFail(t *testing.T) { } pool := &core.TokenPool{ Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, } mdi := am.database.(*databasemocks.Plugin) @@ -779,7 +818,7 @@ func TestTransferTokensNoFromOrTo(t *testing.T) { } pool := &core.TokenPool{ Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, } mdi := am.database.(*databasemocks.Plugin) @@ -850,7 +889,7 @@ func TestTransferTokensWithBroadcastMessage(t *testing.T) { } pool := &core.TokenPool{ Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, } mdi := am.database.(*databasemocks.Plugin) @@ -869,7 +908,7 @@ func TestTransferTokensWithBroadcastMessage(t *testing.T) { mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(transferData) return op.Type == core.OpTypeTokenTransfer && data.Pool == pool && data.Transfer == &transfer.TokenTransfer - })).Return(nil, nil) + }), true).Return(nil, nil) _, err := am.TransferTokens(context.Background(), transfer, false) assert.NoError(t, err) @@ -953,7 +992,7 @@ func TestTransferTokensWithBroadcastMessageSendFail(t *testing.T) { } pool := &core.TokenPool{ Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, } mdi := am.database.(*databasemocks.Plugin) @@ -1050,7 +1089,7 @@ func TestTransferTokensWithPrivateMessage(t *testing.T) { } pool := &core.TokenPool{ Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, } mdi := am.database.(*databasemocks.Plugin) @@ -1069,7 +1108,7 @@ func TestTransferTokensWithPrivateMessage(t *testing.T) { mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(transferData) return op.Type == core.OpTypeTokenTransfer && data.Pool == pool && data.Transfer == &transfer.TokenTransfer - })).Return(nil, nil) + }), true).Return(nil, nil) _, err := am.TransferTokens(context.Background(), transfer, false) assert.NoError(t, err) @@ -1174,7 +1213,7 @@ func TestTransferTokensConfirm(t *testing.T) { } pool := &core.TokenPool{ Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, } mdi := am.database.(*databasemocks.Plugin) @@ -1195,7 +1234,7 @@ func TestTransferTokensConfirm(t *testing.T) { mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(transferData) return op.Type == core.OpTypeTokenTransfer && data.Pool == pool && data.Transfer == &transfer.TokenTransfer - })).Return(nil, nil) + }), true).Return(nil, nil) _, err := am.TransferTokens(context.Background(), transfer, true) assert.NoError(t, err) @@ -1237,7 +1276,7 @@ func TestTransferTokensWithBroadcastConfirm(t *testing.T) { } pool := &core.TokenPool{ Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, } mdi := am.database.(*databasemocks.Plugin) @@ -1269,7 +1308,7 @@ func TestTransferTokensWithBroadcastConfirm(t *testing.T) { mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(transferData) return op.Type == core.OpTypeTokenTransfer && data.Pool == pool && data.Transfer == &transfer.TokenTransfer - })).Return(nil, nil) + }), true).Return(nil, nil) _, err := am.TransferTokens(context.Background(), transfer, true) assert.NoError(t, err) @@ -1326,7 +1365,7 @@ func TestTransferPrepare(t *testing.T) { } pool := &core.TokenPool{ Connector: "magic-tokens", - State: core.TokenPoolStateConfirmed, + Active: true, } sender := am.NewTransfer(transfer) diff --git a/internal/batch/batch_manager.go b/internal/batch/batch_manager.go index 7517cd29e5..18d556906d 100644 --- a/internal/batch/batch_manager.go +++ b/internal/batch/batch_manager.go @@ -137,8 +137,8 @@ type dispatcher struct { options DispatcherOptions } -func (bm *batchManager) getProcessorKey(identity *core.SignerRef, groupID *fftypes.Bytes32) string { - return fmt.Sprintf("%s|%s|%v", identity.Author, identity.Key, groupID) +func (bm *batchManager) getProcessorKey(author string, groupID *fftypes.Bytes32) string { + return fmt.Sprintf("%s|%v", author, groupID) } func (bm *batchManager) getDispatcherKey(txType core.TransactionType, msgType core.MessageType) string { @@ -172,7 +172,7 @@ func (bm *batchManager) NewMessages() chan<- int64 { return bm.newMessages } -func (bm *batchManager) getProcessor(txType core.TransactionType, msgType core.MessageType, group *fftypes.Bytes32, signer *core.SignerRef) (*batchProcessor, error) { +func (bm *batchManager) getProcessor(txType core.TransactionType, msgType core.MessageType, group *fftypes.Bytes32, author string) (*batchProcessor, error) { bm.dispatcherMux.Lock() defer bm.dispatcherMux.Unlock() @@ -181,7 +181,7 @@ func (bm *batchManager) getProcessor(txType core.TransactionType, msgType core.M if !ok { return nil, i18n.NewError(bm.ctx, coremsgs.MsgUnregisteredBatchType, dispatcherKey) } - name := bm.getProcessorKey(signer, group) + name := bm.getProcessorKey(author, group) processor, ok := dispatcher.processors[name] if !ok { processor = newBatchProcessor( @@ -191,7 +191,7 @@ func (bm *batchManager) getProcessor(txType core.TransactionType, msgType core.M name: name, txType: txType, dispatcherName: dispatcher.name, - signer: *signer, + author: author, group: group, dispatch: dispatcher.handler, }, @@ -320,7 +320,7 @@ func (bm *batchManager) messageSequencer() { // the database store. Meaning we cannot rely on the sequence having been set. msg.Sequence = entry.Sequence - processor, err := bm.getProcessor(msg.Header.TxType, msg.Header.Type, msg.Header.Group, &msg.Header.SignerRef) + processor, err := bm.getProcessor(msg.Header.TxType, msg.Header.Type, msg.Header.Group, msg.Header.SignerRef.Author) if err != nil { l.Errorf("Failed to dispatch message %s: %s", msg.Header.ID, err) continue diff --git a/internal/batch/batch_manager_test.go b/internal/batch/batch_manager_test.go index 24c5956532..6449617f7b 100644 --- a/internal/batch/batch_manager_test.go +++ b/internal/batch/batch_manager_test.go @@ -339,7 +339,7 @@ func TestGetInvalidBatchTypeMsg(t *testing.T) { txHelper, _ := txcommon.NewTransactionHelper(ctx, "ns1", mdi, mdm, cmi) bm, _ := NewBatchManager(context.Background(), "ns1", mdi, mdm, mim, txHelper) defer bm.Close() - _, err := bm.(*batchManager).getProcessor(core.BatchTypeBroadcast, "wrong", nil, &core.SignerRef{}) + _, err := bm.(*batchManager).getProcessor(core.BatchTypeBroadcast, "wrong", nil, "") assert.Regexp(t, "FF10126", err) } diff --git a/internal/batch/batch_processor.go b/internal/batch/batch_processor.go index ee274850de..b435e5279a 100644 --- a/internal/batch/batch_processor.go +++ b/internal/batch/batch_processor.go @@ -43,7 +43,7 @@ type batchProcessorConf struct { name string dispatcherName string txType core.TransactionType - signer core.SignerRef + author string group *fftypes.Bytes32 dispatch DispatchHandler } @@ -175,31 +175,40 @@ func (bp *batchProcessor) addWork(newWork *batchWork) (full, overflow bool) { log.L(bp.ctx).Warnf("Adding message to a new batch when one was already assigned. Old batch %s is likely abandoned.", newWork.msg.BatchID) } + // Check for conditions that prevent this piece of work from going into the current batch + // (i.e. the new work is specifically assigned a separate transaction or signing key) + batchOfOne := bp.conf.txType == core.TransactionTypeContractInvokePin + if batchOfOne { + full = true + overflow = len(bp.assemblyQueue) > 0 + } else if len(bp.assemblyQueue) > 0 { + full = newWork.msg.Header.Key != bp.assemblyQueue[0].msg.Header.Key + overflow = true + } + // Build the new sorted work list - for _, work := range bp.assemblyQueue { - if !added && newWork.msg.Sequence < work.msg.Sequence { + if full { + bp.assemblyQueue = append(bp.assemblyQueue, newWork) + } else { + for _, work := range bp.assemblyQueue { + if !added && newWork.msg.Sequence < work.msg.Sequence { + newQueue = append(newQueue, newWork) + added = true + } + newQueue = append(newQueue, work) + } + if !added { newQueue = append(newQueue, newWork) - added = true } - newQueue = append(newQueue, work) - } - if !added { - newQueue = append(newQueue, newWork) - } - log.L(bp.ctx).Debugf("Added message %s sequence=%d to in-flight batch assembly %s", newWork.msg.Header.ID, newWork.msg.Sequence, bp.assemblyID) - bp.assemblyQueueBytes += newWork.estimateSize() - bp.assemblyQueue = newQueue + bp.assemblyQueueBytes += newWork.estimateSize() + bp.assemblyQueue = newQueue - batchOfOne := bp.conf.txType == core.TransactionTypeContractInvokePin - if batchOfOne { - // Special handling for processors that allow only one message per batch - full = true - overflow = len(bp.assemblyQueue) > 1 - } else { - full = len(bp.assemblyQueue) >= int(bp.conf.BatchMaxSize) || (bp.assemblyQueueBytes >= bp.conf.BatchMaxBytes) - overflow = len(bp.assemblyQueue) > 1 && (bp.assemblyQueueBytes > bp.conf.BatchMaxBytes) + full = len(bp.assemblyQueue) >= int(bp.conf.BatchMaxSize) || bp.assemblyQueueBytes >= bp.conf.BatchMaxBytes + overflow = len(bp.assemblyQueue) > 1 && (batchOfOne || bp.assemblyQueueBytes > bp.conf.BatchMaxBytes) } + + log.L(bp.ctx).Debugf("Added message %s sequence=%d to in-flight batch assembly %s", newWork.msg.Header.ID, newWork.msg.Sequence, bp.assemblyID) return full, overflow } @@ -390,9 +399,12 @@ func (bp *batchProcessor) initPayload(id *fftypes.UUID, flushWork []*batchWork) ID: id, Type: bp.conf.DispatcherOptions.BatchType, Namespace: bp.bm.namespace, - SignerRef: bp.conf.signer, - Group: bp.conf.group, - Created: fftypes.Now(), + SignerRef: core.SignerRef{ + Author: bp.conf.author, + Key: flushWork[0].msg.Header.Key, + }, + Group: bp.conf.group, + Created: fftypes.Now(), }, }, } diff --git a/internal/batch/batch_processor_test.go b/internal/batch/batch_processor_test.go index 997cf60dd0..39426c4efb 100644 --- a/internal/batch/batch_processor_test.go +++ b/internal/batch/batch_processor_test.go @@ -48,7 +48,7 @@ func newTestBatchProcessor(t *testing.T, dispatch DispatchHandler) (func(), *dat txHelper, _ := txcommon.NewTransactionHelper(ctx, "ns1", mdi, mdm, cmi) bp := newBatchProcessor(bm, &batchProcessorConf{ txType: core.TransactionTypeBatchPin, - signer: core.SignerRef{Author: "did:firefly:org/abcd", Key: "0x12345"}, + author: "did:firefly:org/abcd", dispatch: dispatch, DispatcherOptions: DispatcherOptions{ BatchMaxSize: 10, @@ -423,6 +423,62 @@ func TestAddWorkBatchOfOne(t *testing.T) { }, bp.assemblyQueue) } +func TestAddWorkDifferentKeys(t *testing.T) { + cancel, _, bp := newTestBatchProcessor(t, func(c context.Context, state *DispatchPayload) error { + return nil + }) + defer cancel() + + msg1 := &core.Message{ + Sequence: 200, + Header: core.MessageHeader{ + SignerRef: core.SignerRef{ + Key: "0x1", + }, + }, + } + msg2 := &core.Message{ + Sequence: 201, + Header: core.MessageHeader{ + SignerRef: core.SignerRef{ + Key: "0x2", + }, + }, + } + msg3 := &core.Message{ + Sequence: 202, + Header: core.MessageHeader{ + SignerRef: core.SignerRef{ + Key: "0x1", + }, + }, + } + + full, overflow := bp.addWork(&batchWork{msg: msg1}) + assert.False(t, full) + assert.False(t, overflow) + assert.Equal(t, []*batchWork{ + {msg: msg1}, + }, bp.assemblyQueue) + + full, overflow = bp.addWork(&batchWork{msg: msg3}) + assert.False(t, full) + assert.False(t, overflow) + assert.Equal(t, []*batchWork{ + {msg: msg1}, + {msg: msg3}, + }, bp.assemblyQueue) + + full, overflow = bp.addWork(&batchWork{msg: msg2}) + assert.True(t, full) + assert.True(t, overflow) + assert.Equal(t, []*batchWork{ + {msg: msg1}, + {msg: msg3}, + {msg: msg2}, + }, bp.assemblyQueue) +} + func TestAddWorkAbandonedBatch(t *testing.T) { cancel, _, bp := newTestBatchProcessor(t, func(c context.Context, state *DispatchPayload) error { return nil diff --git a/internal/blockchain/bifactory/factory.go b/internal/blockchain/bifactory/factory.go index 6c5289dc64..7171210be7 100644 --- a/internal/blockchain/bifactory/factory.go +++ b/internal/blockchain/bifactory/factory.go @@ -23,6 +23,7 @@ import ( "github.com/hyperledger/firefly-common/pkg/i18n" "github.com/hyperledger/firefly/internal/blockchain/ethereum" "github.com/hyperledger/firefly/internal/blockchain/fabric" + "github.com/hyperledger/firefly/internal/blockchain/tezos" "github.com/hyperledger/firefly/internal/coreconfig" "github.com/hyperledger/firefly/internal/coremsgs" "github.com/hyperledger/firefly/pkg/blockchain" @@ -31,6 +32,7 @@ import ( var pluginsByType = map[string]func() blockchain.Plugin{ (*ethereum.Ethereum)(nil).Name(): func() blockchain.Plugin { return ðereum.Ethereum{} }, (*fabric.Fabric)(nil).Name(): func() blockchain.Plugin { return &fabric.Fabric{} }, + (*tezos.Tezos)(nil).Name(): func() blockchain.Plugin { return &tezos.Tezos{} }, } func InitConfig(config config.ArraySection) { diff --git a/internal/blockchain/common/common.go b/internal/blockchain/common/common.go index b692bcfc1c..d15e0b8c4a 100644 --- a/internal/blockchain/common/common.go +++ b/internal/blockchain/common/common.go @@ -21,9 +21,12 @@ import ( "encoding/hex" "encoding/json" "fmt" + "net/http" "strings" "sync" + "github.com/go-resty/resty/v2" + "github.com/hyperledger/firefly-common/pkg/ffresty" "github.com/hyperledger/firefly-common/pkg/fftypes" "github.com/hyperledger/firefly-common/pkg/i18n" "github.com/hyperledger/firefly-common/pkg/log" @@ -32,13 +35,21 @@ import ( "github.com/hyperledger/firefly/pkg/core" ) +// EventsToDispatch is a by-namespace map of ordered blockchain events. +// Note there are some old listeners that do not have a namespace on them, and hence are stored under the empty string, and dispatched to all namespaces. +type EventsToDispatch map[string][]*blockchain.EventToDispatch + type BlockchainCallbacks interface { SetHandler(namespace string, handler blockchain.Callbacks) SetOperationalHandler(namespace string, handler core.OperationCallbacks) OperationUpdate(ctx context.Context, plugin core.Named, nsOpID string, status core.OpStatus, blockchainTXID, errorMessage string, opOutput fftypes.JSONObject) - BatchPinOrNetworkAction(ctx context.Context, subInfo *SubscriptionInfo, location *fftypes.JSONAny, event *blockchain.Event, signingKey *core.VerifierRef, params *BatchPinParams) error - BlockchainEvent(ctx context.Context, namespace string, event *blockchain.EventWithSubscription) error + // Common logic for parsing a BatchPinOrNetworkAction event, and if not discarded to add it to the by-namespace map + PrepareBatchPinOrNetworkAction(ctx context.Context, events EventsToDispatch, subInfo *SubscriptionInfo, location *fftypes.JSONAny, event *blockchain.Event, signingKey *core.VerifierRef, params *BatchPinParams) + // Common logic for parsing a BatchPinOrNetworkAction event, and if not discarded to add it to the by-namespace map + PrepareBlockchainEvent(ctx context.Context, events EventsToDispatch, namespace string, event *blockchain.EventForListener) + // Dispatch logic, that ensures all the right namespace callbacks get called for the event batch + DispatchBlockchainEvents(ctx context.Context, events EventsToDispatch) error } type FireflySubscriptions interface { @@ -48,7 +59,7 @@ type FireflySubscriptions interface { } type callbacks struct { - writeLock sync.Mutex + lock sync.RWMutex handlers map[string]blockchain.Callbacks opHandlers map[string]core.OperationCallbacks } @@ -88,6 +99,24 @@ type BlockchainReceiptNotification struct { ContractLocation *fftypes.JSONAny `json:"contractLocation,omitempty"` } +type BlockchainRESTError struct { + Error string `json:"error,omitempty"` + // See https://github.com/hyperledger/firefly-transaction-manager/blob/main/pkg/ffcapi/submission_error.go + SubmissionRejected bool `json:"submissionRejected,omitempty"` +} + +type conflictError struct { + err error +} + +func (ce *conflictError) Error() string { + return ce.err.Error() +} + +func (ce *conflictError) IsConflictError() bool { + return true +} + func NewBlockchainCallbacks() BlockchainCallbacks { return &callbacks{ handlers: make(map[string]blockchain.Callbacks), @@ -102,15 +131,23 @@ func NewFireflySubscriptions() FireflySubscriptions { } func (cb *callbacks) SetHandler(namespace string, handler blockchain.Callbacks) { - cb.writeLock.Lock() - defer cb.writeLock.Unlock() - cb.handlers[namespace] = handler + cb.lock.Lock() + defer cb.lock.Unlock() + if handler == nil { + delete(cb.handlers, namespace) + } else { + cb.handlers[namespace] = handler + } } func (cb *callbacks) SetOperationalHandler(namespace string, handler core.OperationCallbacks) { - cb.writeLock.Lock() - defer cb.writeLock.Unlock() - cb.opHandlers[namespace] = handler + cb.lock.Lock() + defer cb.lock.Unlock() + if handler == nil { + delete(cb.opHandlers, namespace) + } else { + cb.opHandlers[namespace] = handler + } } func (cb *callbacks) OperationUpdate(ctx context.Context, plugin core.Named, nsOpID string, status core.OpStatus, blockchainTXID, errorMessage string, opOutput fftypes.JSONObject) { @@ -129,7 +166,7 @@ func (cb *callbacks) OperationUpdate(ctx context.Context, plugin core.Named, nsO log.L(ctx).Errorf("No handler found for blockchain operation '%s'", nsOpID) } -func (cb *callbacks) BatchPinOrNetworkAction(ctx context.Context, subInfo *SubscriptionInfo, location *fftypes.JSONAny, event *blockchain.Event, signingKey *core.VerifierRef, params *BatchPinParams) error { +func (cb *callbacks) PrepareBatchPinOrNetworkAction(ctx context.Context, events EventsToDispatch, subInfo *SubscriptionInfo, location *fftypes.JSONAny, event *blockchain.Event, signingKey *core.VerifierRef, params *BatchPinParams) { // Check if this is actually an operator action if len(params.Contexts) == 0 && strings.HasPrefix(params.NsOrAction, blockchain.FireFlyActionPrefix) { action := params.NsOrAction[len(blockchain.FireFlyActionPrefix):] @@ -141,14 +178,16 @@ func (cb *callbacks) BatchPinOrNetworkAction(ctx context.Context, subInfo *Subsc for _, localNames := range subInfo.V1Namespace { namespaces = append(namespaces, localNames...) } - return cb.networkAction(ctx, namespaces, action, location, event, signingKey) + cb.addNetworkAction(ctx, events, namespaces, action, location, event, signingKey) + return } - return cb.networkAction(ctx, []string{subInfo.V2Namespace}, action, location, event, signingKey) + cb.addNetworkAction(ctx, events, []string{subInfo.V2Namespace}, action, location, event, signingKey) + return } batch, err := buildBatchPin(ctx, event, params) if err != nil { - return nil // move on + return // move on } // For V1 of the FireFly contract, namespace is passed explicitly, but needs to be mapped to local name(s). @@ -158,9 +197,10 @@ func (cb *callbacks) BatchPinOrNetworkAction(ctx context.Context, subInfo *Subsc namespaces := subInfo.V1Namespace[networkNamespace] if len(namespaces) == 0 { log.L(ctx).Errorf("No handler found for blockchain batch pin on network namespace '%s'", networkNamespace) - return nil + return } - return cb.batchPinComplete(ctx, namespaces, batch, signingKey) + cb.addBatchPinComplete(ctx, events, namespaces, batch, signingKey) + return } batch.TransactionType = core.TransactionTypeBatchPin if strings.HasPrefix(params.NsOrAction, blockchain.FireFlyActionPrefix) { @@ -169,48 +209,82 @@ func (cb *callbacks) BatchPinOrNetworkAction(ctx context.Context, subInfo *Subsc batch.TransactionType = core.TransactionTypeContractInvokePin } } - return cb.batchPinComplete(ctx, []string{subInfo.V2Namespace}, batch, signingKey) + cb.addBatchPinComplete(ctx, events, []string{subInfo.V2Namespace}, batch, signingKey) } -func (cb *callbacks) batchPinComplete(ctx context.Context, namespaces []string, batch *blockchain.BatchPin, signingKey *core.VerifierRef) error { +func (cb *callbacks) addBatchPinComplete(ctx context.Context, events EventsToDispatch, namespaces []string, batch *blockchain.BatchPin, signingKey *core.VerifierRef) { + cb.lock.RLock() + defer cb.lock.RUnlock() for _, namespace := range namespaces { - if handler, ok := cb.handlers[namespace]; ok { - if err := handler.BatchPinComplete(namespace, batch, signingKey); err != nil { - return err - } + if _, ok := cb.handlers[namespace]; ok { + events[namespace] = append(events[namespace], &blockchain.EventToDispatch{ + Type: blockchain.EventTypeBatchPinComplete, + BatchPinComplete: &blockchain.BatchPinCompleteEvent{ + Namespace: namespace, + Batch: batch, + SigningKey: signingKey, + }, + }) } else { log.L(ctx).Errorf("No handler found for blockchain batch pin on local namespace '%s'", namespace) } } - return nil } -func (cb *callbacks) networkAction(ctx context.Context, namespaces []string, action string, location *fftypes.JSONAny, event *blockchain.Event, signingKey *core.VerifierRef) error { +func (cb *callbacks) addNetworkAction(ctx context.Context, events EventsToDispatch, namespaces []string, action string, location *fftypes.JSONAny, event *blockchain.Event, signingKey *core.VerifierRef) { + cb.lock.RLock() + defer cb.lock.RUnlock() for _, namespace := range namespaces { - if handler, ok := cb.handlers[namespace]; ok { - if err := handler.BlockchainNetworkAction(action, location, event, signingKey); err != nil { - return err - } + if _, ok := cb.handlers[namespace]; ok { + events[namespace] = append(events[namespace], &blockchain.EventToDispatch{ + Type: blockchain.EventTypeNetworkAction, + NetworkAction: &blockchain.NetworkActionEvent{ + Action: action, + Location: location, + Event: event, + SigningKey: signingKey, + }, + }) } else { log.L(ctx).Errorf("No handler found for blockchain network action on local namespace '%s'", namespace) } } - return nil } -func (cb *callbacks) BlockchainEvent(ctx context.Context, namespace string, event *blockchain.EventWithSubscription) error { +func (cb *callbacks) PrepareBlockchainEvent(ctx context.Context, events EventsToDispatch, namespace string, event *blockchain.EventForListener) { + cb.lock.RLock() + defer cb.lock.RUnlock() if namespace == "" { // Older subscriptions don't populate namespace, so deliver the event to every handler - for _, cb := range cb.handlers { - if err := cb.BlockchainEvent(event); err != nil { - return err - } + for namespace := range cb.handlers { + events[namespace] = append(events[namespace], &blockchain.EventToDispatch{ + Type: blockchain.EventTypeForListener, + ForListener: event, + }) } } else { + if _, ok := cb.handlers[namespace]; ok { + events[namespace] = append(events[namespace], &blockchain.EventToDispatch{ + Type: blockchain.EventTypeForListener, + ForListener: event, + }) + } else { + log.L(ctx).Errorf("No handler found for blockchain event on namespace '%s'", namespace) + } + } +} + +func (cb *callbacks) DispatchBlockchainEvents(ctx context.Context, events EventsToDispatch) error { + cb.lock.RLock() + defer cb.lock.RUnlock() + // The event batches for each namespace are already built, and ready to dispatch. + // Just run around the handlers dispatching the list of events for each. + for namespace, events := range events { if handler, ok := cb.handlers[namespace]; ok { - return handler.BlockchainEvent(event) + if err := handler.BlockchainEventBatch(events); err != nil { + return err + } } - log.L(ctx).Errorf("No handler found for blockchain event on namespace '%s'", namespace) } return nil } @@ -260,13 +334,24 @@ func buildBatchPin(ctx context.Context, event *blockchain.Event, params *BatchPi } func GetNamespaceFromSubName(subName string) string { - var parts = strings.Split(subName, "-") // Subscription names post version 1.1 are in the format `ff-sub--` - if len(parts) != 4 { - // Assume older subscription and return empty string - return "" + // Priot to that they had the format `ff-sub-` + + // Strip the "ff-sub-" prefix from the beginning of the name + withoutPrefix := strings.TrimPrefix(subName, "ff-sub-") + if len(withoutPrefix) < len(subName) { + // Strip the listener ID from the end of the name + const UUIDLength = 36 + if len(withoutPrefix) > UUIDLength { + uuidSplit := len(withoutPrefix) - UUIDLength - 1 + namespace := withoutPrefix[:uuidSplit] + listenerID := withoutPrefix[uuidSplit:] + if strings.HasPrefix(listenerID, "-") { + return namespace + } + } } - return parts[2] + return "" } func (s *subscriptions) AddSubscription(ctx context.Context, namespace *core.Namespace, version int, subID string, extra interface{}) { @@ -337,3 +422,16 @@ func HandleReceipt(ctx context.Context, plugin core.Named, reply *BlockchainRece return nil } + +func WrapRESTError(ctx context.Context, errRes *BlockchainRESTError, res *resty.Response, err error, defMsgKey i18n.ErrorMessageKey) error { + if errRes != nil && errRes.Error != "" { + if res != nil && res.StatusCode() == http.StatusConflict { + return &conflictError{err: i18n.WrapError(ctx, err, coremsgs.MsgBlockchainConnectorRESTErrConflict, errRes.Error)} + } + return i18n.WrapError(ctx, err, defMsgKey, errRes.Error) + } + if res != nil && res.StatusCode() == http.StatusConflict { + return &conflictError{err: ffresty.WrapRestErr(ctx, res, err, coremsgs.MsgBlockchainConnectorRESTErrConflict)} + } + return ffresty.WrapRestErr(ctx, res, err, defMsgKey) +} diff --git a/internal/blockchain/common/common_test.go b/internal/blockchain/common/common_test.go index 92fab73d04..402a40339d 100644 --- a/internal/blockchain/common/common_test.go +++ b/internal/blockchain/common/common_test.go @@ -20,9 +20,13 @@ import ( "context" "encoding/json" "fmt" + "net/http" "testing" + "github.com/go-resty/resty/v2" "github.com/hyperledger/firefly-common/pkg/fftypes" + "github.com/hyperledger/firefly/internal/coremsgs" + "github.com/hyperledger/firefly/internal/operations" "github.com/hyperledger/firefly/mocks/blockchainmocks" "github.com/hyperledger/firefly/mocks/coremocks" "github.com/hyperledger/firefly/pkg/blockchain" @@ -55,24 +59,47 @@ func TestCallbackOperationUpdate(t *testing.T) { mcb.AssertExpectations(t) } +func matchBatchWithEvent(protocolID string) interface{} { + return mock.MatchedBy(func(batch []*blockchain.EventToDispatch) bool { + return len(batch) == 1 && + batch[0].Type == blockchain.EventTypeForListener && + batch[0].ForListener.ProtocolID == protocolID + }) +} + func TestCallbackBlockchainEvent(t *testing.T) { - event := &blockchain.EventWithSubscription{} + event := &blockchain.EventForListener{ + Event: &blockchain.Event{ + ProtocolID: "012345", + }, + } mcb := &blockchainmocks.Callbacks{} cb := NewBlockchainCallbacks() cb.SetHandler("ns1", mcb) - mcb.On("BlockchainEvent", event).Return(nil).Once() - err := cb.BlockchainEvent(context.Background(), "ns1", event) + mcb.On("BlockchainEventBatch", matchBatchWithEvent("012345")).Return(nil).Once() + events := make(EventsToDispatch) + cb.PrepareBlockchainEvent(context.Background(), events, "ns1", event) + err := cb.DispatchBlockchainEvents(context.Background(), events) assert.NoError(t, err) - err = cb.BlockchainEvent(context.Background(), "ns2", event) + events = make(EventsToDispatch) + cb.PrepareBlockchainEvent(context.Background(), events, "ns2", event) + err = cb.DispatchBlockchainEvents(context.Background(), events) assert.NoError(t, err) - mcb.On("BlockchainEvent", event).Return(fmt.Errorf("pop")).Once() - err = cb.BlockchainEvent(context.Background(), "", event) + mcb.On("BlockchainEventBatch", matchBatchWithEvent("012345")).Return(fmt.Errorf("pop")).Once() + events = make(EventsToDispatch) + cb.PrepareBlockchainEvent(context.Background(), events, "", event) + err = cb.DispatchBlockchainEvents(context.Background(), events) assert.EqualError(t, err, "pop") + cb.SetHandler("ns1", nil) + assert.Empty(t, cb.(*callbacks).handlers) + cb.SetOperationalHandler("ns1", nil) + assert.Empty(t, cb.(*callbacks).opHandlers) + mcb.AssertExpectations(t) } @@ -89,12 +116,23 @@ func TestCallbackBatchPinBadBatch(t *testing.T) { Version: 2, V2Namespace: "ns1", } - err := cb.BatchPinOrNetworkAction(context.Background(), sub, fftypes.JSONAnyPtr("{}"), event, verifier, params) + events := make(EventsToDispatch) + cb.PrepareBatchPinOrNetworkAction(context.Background(), events, sub, fftypes.JSONAnyPtr("{}"), event, verifier, params) + err := cb.DispatchBlockchainEvents(context.Background(), events) assert.NoError(t, err) mcb.AssertExpectations(t) } +func matchBatchPinEvent(ns string, txType core.TransactionType) interface{} { + return mock.MatchedBy(func(batch []*blockchain.EventToDispatch) bool { + return len(batch) == 1 && + batch[0].Type == blockchain.EventTypeBatchPinComplete && + batch[0].BatchPinComplete.Namespace == ns && + batch[0].BatchPinComplete.Batch.TransactionType == txType + }) +} + func TestBatchPinContractInvokePin(t *testing.T) { event := &blockchain.Event{} verifier := &core.VerifierRef{} @@ -113,16 +151,15 @@ func TestBatchPinContractInvokePin(t *testing.T) { cb := NewBlockchainCallbacks() cb.SetHandler("ns1", mcb) - mcb.On("BatchPinComplete", "ns1", mock.MatchedBy(func(batchPin *blockchain.BatchPin) bool { - assert.Equal(t, core.TransactionTypeContractInvokePin, batchPin.TransactionType) - return true - }), mock.Anything).Return(nil) + mcb.On("BlockchainEventBatch", matchBatchPinEvent("ns1", core.TransactionTypeContractInvokePin)).Return(nil) sub := &SubscriptionInfo{ Version: 2, V2Namespace: "ns1", } - err := cb.BatchPinOrNetworkAction(context.Background(), sub, fftypes.JSONAnyPtr("{}"), event, verifier, params) + events := make(EventsToDispatch) + cb.PrepareBatchPinOrNetworkAction(context.Background(), events, sub, fftypes.JSONAnyPtr("{}"), event, verifier, params) + err := cb.DispatchBlockchainEvents(context.Background(), events) assert.NoError(t, err) mcb.AssertExpectations(t) @@ -149,12 +186,16 @@ func TestCallbackBatchPin(t *testing.T) { Version: 2, V2Namespace: "ns1", } - mcb.On("BatchPinComplete", "ns1", mock.Anything, verifier).Return(nil).Once() - err := cb.BatchPinOrNetworkAction(context.Background(), sub, fftypes.JSONAnyPtr("{}"), event, verifier, params) + mcb.On("BlockchainEventBatch", matchBatchPinEvent("ns1", core.TransactionTypeBatchPin)).Return(nil).Once() + events := make(EventsToDispatch) + cb.PrepareBatchPinOrNetworkAction(context.Background(), events, sub, fftypes.JSONAnyPtr("{}"), event, verifier, params) + err := cb.DispatchBlockchainEvents(context.Background(), events) assert.NoError(t, err) - mcb.On("BatchPinComplete", "ns1", mock.Anything, verifier).Return(fmt.Errorf("pop")).Once() - err = cb.BatchPinOrNetworkAction(context.Background(), sub, fftypes.JSONAnyPtr("{}"), event, verifier, params) + mcb.On("BlockchainEventBatch", matchBatchPinEvent("ns1", core.TransactionTypeBatchPin)).Return(fmt.Errorf("pop")).Once() + events = make(EventsToDispatch) + cb.PrepareBatchPinOrNetworkAction(context.Background(), events, sub, fftypes.JSONAnyPtr("{}"), event, verifier, params) + err = cb.DispatchBlockchainEvents(context.Background(), events) assert.EqualError(t, err, "pop") sub = &SubscriptionInfo{ @@ -162,20 +203,35 @@ func TestCallbackBatchPin(t *testing.T) { V1Namespace: map[string][]string{"ns2": {"ns1", "ns"}}, } params.NsOrAction = "ns2" - mcb.On("BatchPinComplete", "ns1", mock.Anything, verifier).Return(nil).Once() - err = cb.BatchPinOrNetworkAction(context.Background(), sub, fftypes.JSONAnyPtr("{}"), event, verifier, params) + mcb.On("BlockchainEventBatch", matchBatchPinEvent("ns1", "" /* no tx type for V1 */)).Return(nil).Once() + events = make(EventsToDispatch) + cb.PrepareBatchPinOrNetworkAction(context.Background(), events, sub, fftypes.JSONAnyPtr("{}"), event, verifier, params) + err = cb.DispatchBlockchainEvents(context.Background(), events) assert.NoError(t, err) params.NsOrAction = "ns3" - err = cb.BatchPinOrNetworkAction(context.Background(), sub, fftypes.JSONAnyPtr("{}"), event, verifier, params) + events = make(EventsToDispatch) + cb.PrepareBatchPinOrNetworkAction(context.Background(), events, sub, fftypes.JSONAnyPtr("{}"), event, verifier, params) + err = cb.DispatchBlockchainEvents(context.Background(), events) assert.NoError(t, err) mcb.AssertExpectations(t) } +func matchNetworkActionEvent(action string, verifier core.VerifierRef) interface{} { + return mock.MatchedBy(func(batch []*blockchain.EventToDispatch) bool { + return len(batch) == 1 && + batch[0].Type == blockchain.EventTypeNetworkAction && + batch[0].NetworkAction.Action == action && + *batch[0].NetworkAction.SigningKey == verifier + }) +} + func TestCallbackNetworkAction(t *testing.T) { event := &blockchain.Event{} - verifier := &core.VerifierRef{} + verifier := core.VerifierRef{ + Value: "0x12345", + } params := &BatchPinParams{ NsOrAction: "firefly:terminate", } @@ -188,20 +244,26 @@ func TestCallbackNetworkAction(t *testing.T) { Version: 2, V2Namespace: "ns1", } - mcb.On("BlockchainNetworkAction", "terminate", mock.Anything, mock.Anything, verifier).Return(nil).Once() - err := cb.BatchPinOrNetworkAction(context.Background(), sub, fftypes.JSONAnyPtr("{}"), event, verifier, params) + mcb.On("BlockchainEventBatch", matchNetworkActionEvent("terminate", verifier)).Return(nil).Once() + events := make(EventsToDispatch) + cb.PrepareBatchPinOrNetworkAction(context.Background(), events, sub, fftypes.JSONAnyPtr("{}"), event, &verifier, params) + err := cb.DispatchBlockchainEvents(context.Background(), events) assert.NoError(t, err) - mcb.On("BlockchainNetworkAction", "terminate", mock.Anything, mock.Anything, verifier).Return(fmt.Errorf("pop")).Once() - err = cb.BatchPinOrNetworkAction(context.Background(), sub, fftypes.JSONAnyPtr("{}"), event, verifier, params) + mcb.On("BlockchainEventBatch", matchNetworkActionEvent("terminate", verifier)).Return(fmt.Errorf("pop")).Once() + events = make(EventsToDispatch) + cb.PrepareBatchPinOrNetworkAction(context.Background(), events, sub, fftypes.JSONAnyPtr("{}"), event, &verifier, params) + err = cb.DispatchBlockchainEvents(context.Background(), events) assert.EqualError(t, err, "pop") sub = &SubscriptionInfo{ Version: 1, V1Namespace: map[string][]string{"ns2": {"ns1", "ns"}}, } - mcb.On("BlockchainNetworkAction", "terminate", mock.Anything, mock.Anything, verifier).Return(nil).Once() - err = cb.BatchPinOrNetworkAction(context.Background(), sub, fftypes.JSONAnyPtr("{}"), event, verifier, params) + mcb.On("BlockchainEventBatch", matchNetworkActionEvent("terminate", verifier)).Return(nil).Once() + events = make(EventsToDispatch) + cb.PrepareBatchPinOrNetworkAction(context.Background(), events, sub, fftypes.JSONAnyPtr("{}"), event, &verifier, params) + err = cb.DispatchBlockchainEvents(context.Background(), events) assert.NoError(t, err) mcb.AssertExpectations(t) @@ -231,9 +293,15 @@ func TestBuildBatchPinErrors(t *testing.T) { } func TestGetNamespaceFromSubName(t *testing.T) { - ns := GetNamespaceFromSubName("ff-sub-ns1-123") + ns := GetNamespaceFromSubName("ff-sub-ns1-03071072-079b-4047-b192-a07186fc9db8") assert.Equal(t, "ns1", ns) + ns = GetNamespaceFromSubName("ff-sub-03071072-079b-4047-b192-a07186fc9db8") + assert.Equal(t, "", ns) + + ns = GetNamespaceFromSubName("ff-sub-ns1-123") + assert.Equal(t, "", ns) + ns = GetNamespaceFromSubName("BAD") assert.Equal(t, "", ns) } @@ -319,3 +387,62 @@ func TestBadReceipt(t *testing.T) { err = HandleReceipt(context.Background(), nil, &reply, nil) assert.Error(t, err) } + +func TestErrorWrappingConflict(t *testing.T) { + ctx := context.Background() + res := &resty.Response{ + RawResponse: &http.Response{StatusCode: 409}, + } + err := WrapRESTError(ctx, nil, res, fmt.Errorf("pop"), coremsgs.MsgEthConnectorRESTErr) + assert.Regexp(t, "FF10458", err) + assert.Regexp(t, "pop", err) + + conflictInterface, conforms := err.(operations.ConflictError) + assert.True(t, conforms) + assert.True(t, conflictInterface.IsConflictError()) +} + +func TestErrorWrappingConflictErrorInBody(t *testing.T) { + ctx := context.Background() + res := &resty.Response{ + RawResponse: &http.Response{StatusCode: 409}, + } + err := WrapRESTError(ctx, &BlockchainRESTError{Error: "snap"}, res, fmt.Errorf("pop"), coremsgs.MsgEthConnectorRESTErr) + assert.Regexp(t, "FF10458", err) + assert.Regexp(t, "snap", err) + + conflictInterface, conforms := err.(operations.ConflictError) + assert.True(t, conforms) + assert.True(t, conflictInterface.IsConflictError()) +} + +func TestErrorWrappingError(t *testing.T) { + ctx := context.Background() + err := WrapRESTError(ctx, nil, nil, fmt.Errorf("pop"), coremsgs.MsgEthConnectorRESTErr) + assert.Regexp(t, "pop", err) + + _, conforms := err.(operations.ConflictError) + assert.False(t, conforms) +} + +func TestErrorWrappingErrorRes(t *testing.T) { + ctx := context.Background() + + err := WrapRESTError(ctx, &BlockchainRESTError{Error: "snap"}, nil, fmt.Errorf("pop"), coremsgs.MsgEthConnectorRESTErr) + assert.Regexp(t, "snap", err) + + _, conforms := err.(operations.ConflictError) + assert.False(t, conforms) +} + +func TestErrorWrappingNonConflict(t *testing.T) { + ctx := context.Background() + res := &resty.Response{ + RawResponse: &http.Response{StatusCode: 500}, + } + err := WrapRESTError(ctx, nil, res, fmt.Errorf("pop"), coremsgs.MsgEthConnectorRESTErr) + assert.Regexp(t, "pop", err) + + _, conforms := err.(operations.ConflictError) + assert.False(t, conforms) +} diff --git a/internal/blockchain/ethereum/config.go b/internal/blockchain/ethereum/config.go index 38f08cccb9..6ffed65c32 100644 --- a/internal/blockchain/ethereum/config.go +++ b/internal/blockchain/ethereum/config.go @@ -31,6 +31,10 @@ const ( defaultAddressResolverMethod = "GET" defaultAddressResolverResponseField = "address" + + defaultBackgroundInitialDelay = "5s" + defaultBackgroundRetryFactor = 2.0 + defaultBackgroundMaxDelay = "1m" ) const ( @@ -51,6 +55,14 @@ const ( EthconnectConfigInstanceDeprecated = "instance" // EthconnectConfigFromBlockDeprecated is the configuration of the first block to listen to when creating the listener for the FireFly contract EthconnectConfigFromBlockDeprecated = "fromBlock" + // EthconnectBackgroundStart is used to not fail the ethereum plugin on init and retry to start it in the background + EthconnectBackgroundStart = "backgroundStart.enabled" + // EthconnectBackgroundStartInitialDelay is delay between restarts in the case where we retry to restart in the ethereum plugin + EthconnectBackgroundStartInitialDelay = "backgroundStart.initialDelay" + // EthconnectBackgroundStartMaxDelay is the max delay between restarts in the case where we retry to restart in the ethereum plugin + EthconnectBackgroundStartMaxDelay = "backgroundStart.maxDelay" + // EthconnectBackgroundStartFactor is to set the factor by which the delay increases when retrying + EthconnectBackgroundStartFactor = "backgroundStart.factor" // AddressResolverConfigKey is a sub-key in the config to contain an address resolver config. AddressResolverConfigKey = "addressResolver" @@ -76,6 +88,10 @@ func (e *Ethereum) InitConfig(config config.Section) { wsclient.InitConfig(e.ethconnectConf) e.ethconnectConf.AddKnownKey(EthconnectConfigTopic) + e.ethconnectConf.AddKnownKey(EthconnectBackgroundStart) + e.ethconnectConf.AddKnownKey(EthconnectBackgroundStartInitialDelay, defaultBackgroundInitialDelay) + e.ethconnectConf.AddKnownKey(EthconnectBackgroundStartFactor, defaultBackgroundRetryFactor) + e.ethconnectConf.AddKnownKey(EthconnectBackgroundStartMaxDelay, defaultBackgroundMaxDelay) e.ethconnectConf.AddKnownKey(EthconnectConfigBatchSize, defaultBatchSize) e.ethconnectConf.AddKnownKey(EthconnectConfigBatchTimeout, defaultBatchTimeout) e.ethconnectConf.AddKnownKey(EthconnectPrefixShort, defaultPrefixShort) diff --git a/internal/blockchain/ethereum/ethereum.go b/internal/blockchain/ethereum/ethereum.go index 3a94388ff8..7cca630fd2 100644 --- a/internal/blockchain/ethereum/ethereum.go +++ b/internal/blockchain/ethereum/ethereum.go @@ -31,6 +31,7 @@ import ( "github.com/hyperledger/firefly-common/pkg/fftypes" "github.com/hyperledger/firefly-common/pkg/i18n" "github.com/hyperledger/firefly-common/pkg/log" + "github.com/hyperledger/firefly-common/pkg/retry" "github.com/hyperledger/firefly-common/pkg/wsclient" "github.com/hyperledger/firefly-signer/pkg/abi" "github.com/hyperledger/firefly-signer/pkg/ffi2abi" @@ -41,6 +42,7 @@ import ( "github.com/hyperledger/firefly/internal/metrics" "github.com/hyperledger/firefly/pkg/blockchain" "github.com/hyperledger/firefly/pkg/core" + "github.com/sirupsen/logrus" ) const ( @@ -75,6 +77,8 @@ type Ethereum struct { ethconnectConf config.Section subs common.FireflySubscriptions cache cache.CInterface + backgroundRetry *retry.Retry + backgroundStart bool } type eventStreamWebsocket struct { @@ -92,10 +96,6 @@ type ethWSCommandPayload struct { Message string `json:"message,omitempty"` } -type ethError struct { - Error string `json:"error,omitempty"` -} - type Location struct { Address string `json:"address"` } @@ -161,7 +161,12 @@ func (e *Ethereum) Init(ctx context.Context, cancelCtx context.CancelFunc, conf if ethconnectConf.GetString(ffresty.HTTPConfigURL) == "" { return i18n.NewError(ctx, coremsgs.MsgMissingPluginConfig, "url", ethconnectConf) } - e.client, err = ffresty.New(e.ctx, ethconnectConf) + + wsConfig, err := wsclient.GenerateConfig(ctx, ethconnectConf) + if err == nil { + e.client, err = ffresty.New(e.ctx, ethconnectConf) + } + if err != nil { return err } @@ -173,11 +178,6 @@ func (e *Ethereum) Init(ctx context.Context, cancelCtx context.CancelFunc, conf e.prefixShort = ethconnectConf.GetString(EthconnectPrefixShort) e.prefixLong = ethconnectConf.GetString(EthconnectPrefixLong) - wsConfig, err := wsclient.GenerateConfig(ctx, ethconnectConf) - if err != nil { - return err - } - if wsConfig.WSKeyPath == "" { wsConfig.WSKeyPath = "/ws" } @@ -199,13 +199,24 @@ func (e *Ethereum) Init(ctx context.Context, cancelCtx context.CancelFunc, conf } e.cache = cache - e.streams = newStreamManager(e.client, e.cache) - batchSize := ethconnectConf.GetUint(EthconnectConfigBatchSize) - batchTimeout := uint(ethconnectConf.GetDuration(EthconnectConfigBatchTimeout).Milliseconds()) - stream, err := e.streams.ensureEventStream(e.ctx, e.topic, batchSize, batchTimeout) + e.streams = newStreamManager(e.client, e.cache, e.ethconnectConf.GetUint(EthconnectConfigBatchSize), uint(e.ethconnectConf.GetDuration(EthconnectConfigBatchTimeout).Milliseconds())) + + e.backgroundStart = e.ethconnectConf.GetBool(EthconnectBackgroundStart) + if e.backgroundStart { + e.backgroundRetry = &retry.Retry{ + InitialDelay: e.ethconnectConf.GetDuration(EthconnectBackgroundStartInitialDelay), + MaximumDelay: e.ethconnectConf.GetDuration(EthconnectBackgroundStartMaxDelay), + Factor: e.ethconnectConf.GetFloat64(EthconnectBackgroundStartFactor), + } + + return nil + } + + stream, err := e.streams.ensureEventStream(e.ctx, e.topic) if err != nil { return err } + e.streamID = stream.ID log.L(e.ctx).Infof("Event stream: %s (topic=%s)", e.streamID, e.topic) @@ -223,7 +234,33 @@ func (e *Ethereum) SetOperationHandler(namespace string, handler core.OperationC e.callbacks.SetOperationalHandler(namespace, handler) } +func (e *Ethereum) startBackgroundLoop() { + _ = e.backgroundRetry.Do(e.ctx, fmt.Sprintf("ethereum connector %s", e.Name()), func(attempt int) (retry bool, err error) { + stream, err := e.streams.ensureEventStream(e.ctx, e.topic) + if err != nil { + return true, err + } + + e.streamID = stream.ID + log.L(e.ctx).Infof("Event stream: %s (topic=%s)", e.streamID, e.topic) + err = e.wsconn.Connect() + if err != nil { + return true, err + } + + e.closed = make(chan struct{}) + go e.eventLoop() + + return false, nil + }) +} + func (e *Ethereum) Start() (err error) { + if e.backgroundStart { + go e.startBackgroundLoop() + return nil + } + return e.wsconn.Connect() } @@ -316,10 +353,10 @@ func (e *Ethereum) parseBlockchainEvent(ctx context.Context, msgJSON fftypes.JSO } } -func (e *Ethereum) handleBatchPinEvent(ctx context.Context, location *fftypes.JSONAny, subInfo *common.SubscriptionInfo, msgJSON fftypes.JSONObject) (err error) { +func (e *Ethereum) processBatchPinEvent(ctx context.Context, events common.EventsToDispatch, location *fftypes.JSONAny, subInfo *common.SubscriptionInfo, msgJSON fftypes.JSONObject) { event := e.parseBlockchainEvent(ctx, msgJSON) if event == nil { - return nil // move on + return // move on } authorAddress := event.Output.GetString("author") @@ -338,41 +375,45 @@ func (e *Ethereum) handleBatchPinEvent(ctx context.Context, location *fftypes.JS // Validate the ethereum address - it must already be a valid address, we do not // engage the address resolve on this blockchain-driven path. - authorAddress, err = formatEthAddress(ctx, authorAddress) + authorAddress, err := formatEthAddress(ctx, authorAddress) if err != nil { log.L(ctx).Errorf("BatchPin event is not valid - bad from address (%s): %+v", err, msgJSON) - return nil // move on + return // move on } verifier := &core.VerifierRef{ Type: core.VerifierTypeEthAddress, Value: authorAddress, } - return e.callbacks.BatchPinOrNetworkAction(ctx, subInfo, location, event, verifier, params) + e.callbacks.PrepareBatchPinOrNetworkAction(ctx, events, subInfo, location, event, verifier, params) } -func (e *Ethereum) handleContractEvent(ctx context.Context, msgJSON fftypes.JSONObject) (err error) { - subName, err := e.streams.getSubscriptionName(ctx, msgJSON.GetString("subId")) +func (e *Ethereum) processContractEvent(ctx context.Context, events common.EventsToDispatch, msgJSON fftypes.JSONObject) error { + subID := msgJSON.GetString("subId") + subName, err := e.streams.getSubscriptionName(ctx, subID) if err != nil { - return err + return err // this is a problem - we should be able to find the listener that dispatched this to us } namespace := common.GetNamespaceFromSubName(subName) event := e.parseBlockchainEvent(ctx, msgJSON) if event != nil { - err = e.callbacks.BlockchainEvent(ctx, namespace, &blockchain.EventWithSubscription{ - Event: *event, - Subscription: msgJSON.GetString("subId"), + e.callbacks.PrepareBlockchainEvent(ctx, events, namespace, &blockchain.EventForListener{ + Event: event, + ListenerID: subID, }) } - return err + return nil } func (e *Ethereum) buildEventLocationString(msgJSON fftypes.JSONObject) string { return fmt.Sprintf("address=%s", msgJSON.GetString("address")) } -func (e *Ethereum) handleMessageBatch(ctx context.Context, messages []interface{}) error { +func (e *Ethereum) handleMessageBatch(ctx context.Context, batchID int64, messages []interface{}) error { + // Build the set of events that need handling + events := make(common.EventsToDispatch) + count := len(messages) for i, msgI := range messages { msgMap, ok := msgI.(map[string]interface{}) if !ok { @@ -381,12 +422,10 @@ func (e *Ethereum) handleMessageBatch(ctx context.Context, messages []interface{ } msgJSON := fftypes.JSONObject(msgMap) - logger := log.L(ctx).WithField("ethmsgidx", i) - eventCtx, done := context.WithCancel(log.WithLogger(ctx, logger)) - signature := msgJSON.GetString("signature") sub := msgJSON.GetString("subId") - logger.Infof("Received '%s' message on '%s'", signature, sub) + logger := log.L(ctx) + logger.Infof("[EVM:%d:%d/%d]: '%s' on '%s'", batchID, i+1, count, signature, sub) logger.Tracef("Message: %+v", msgJSON) // Matches one of the active FireFly BatchPin subscriptions @@ -395,7 +434,6 @@ func (e *Ethereum) handleMessageBatch(ctx context.Context, messages []interface{ Address: msgJSON.GetString("address"), }) if err != nil { - done() return err } @@ -405,25 +443,21 @@ func (e *Ethereum) handleMessageBatch(ctx context.Context, messages []interface{ } switch signature { case broadcastBatchEventSignature: - if err := e.handleBatchPinEvent(eventCtx, location, subInfo, msgJSON); err != nil { - done() - return err - } + e.processBatchPinEvent(ctx, events, location, subInfo, msgJSON) default: log.L(ctx).Infof("Ignoring event with unknown signature: %s", signature) } } else { // Subscription not recognized - assume it's from a custom contract listener // (event manager will reject it if it's not) - if err := e.handleContractEvent(eventCtx, msgJSON); err != nil { - done() + if err := e.processContractEvent(ctx, events, msgJSON); err != nil { return err } } - done() } - - return nil + // Dispatch all the events from this patch that were successfully parsed and routed to namespaces + // (could be zero - that's ok) + return e.callbacks.DispatchBlockchainEvents(ctx, events) } func (e *Ethereum) eventLoop() { @@ -451,7 +485,7 @@ func (e *Ethereum) eventLoop() { } switch msgTyped := msgParsed.(type) { case []interface{}: - err = e.handleMessageBatch(ctx, msgTyped) + err = e.handleMessageBatch(ctx, 0, msgTyped) if err == nil { ack, _ := json.Marshal(ðWSCommandPayload{ Type: "ack", @@ -465,7 +499,7 @@ func (e *Ethereum) eventLoop() { if events, ok := msgTyped["events"].([]interface{}); ok { // FFTM delivery with a batch number to use in the ack isBatch = true - err = e.handleMessageBatch(ctx, events) + err = e.handleMessageBatch(ctx, (int64)(batchNumber), events) // Errors processing messages are converted into nacks ackOrNack := ðWSCommandPayload{ Topic: e.topic, @@ -531,13 +565,6 @@ func (e *Ethereum) ResolveSigningKey(ctx context.Context, key string, intent blo return resolved, err } -func wrapError(ctx context.Context, errRes *ethError, res *resty.Response, err error) error { - if errRes != nil && errRes.Error != "" { - return i18n.WrapError(ctx, err, coremsgs.MsgEthConnectorRESTErr, errRes.Error) - } - return ffresty.WrapRestErr(ctx, res, err, coremsgs.MsgEthConnectorRESTErr) -} - func (e *Ethereum) buildEthconnectRequestBody(ctx context.Context, messageType, address, signingKey string, abi *abi.Entry, requestID string, input []interface{}, errors []*abi.Entry, options map[string]interface{}) (map[string]interface{}, error) { headers := EthconnectMessageHeaders{ Type: messageType, @@ -557,7 +584,15 @@ func (e *Ethereum) buildEthconnectRequestBody(ctx context.Context, messageType, if len(errors) > 0 { body["errors"] = errors } - return e.applyOptions(ctx, body, options) + finalBody, err := e.applyOptions(ctx, body, options) + if err != nil { + return nil, err + } + if logrus.IsLevelEnabled(logrus.DebugLevel) { + jsonBody, _ := json.Marshal(finalBody) + log.L(ctx).Debugf("EVMConnectorBody: %s", string(jsonBody)) + } + return finalBody, nil } func (e *Ethereum) applyOptions(ctx context.Context, body, options map[string]interface{}) (map[string]interface{}, error) { @@ -572,25 +607,25 @@ func (e *Ethereum) applyOptions(ctx context.Context, body, options map[string]in return body, nil } -func (e *Ethereum) invokeContractMethod(ctx context.Context, address, signingKey string, abi *abi.Entry, requestID string, input []interface{}, errors []*abi.Entry, options map[string]interface{}) error { +func (e *Ethereum) invokeContractMethod(ctx context.Context, address, signingKey string, abi *abi.Entry, requestID string, input []interface{}, errors []*abi.Entry, options map[string]interface{}) (submissionRejected bool, err error) { if e.metrics.IsMetricsEnabled() { e.metrics.BlockchainTransaction(address, abi.Name) } messageType := "SendTransaction" body, err := e.buildEthconnectRequestBody(ctx, messageType, address, signingKey, abi, requestID, input, errors, options) if err != nil { - return err + return true, err } - var resErr ethError + var resErr common.BlockchainRESTError res, err := e.client.R(). SetContext(ctx). SetBody(body). SetError(&resErr). Post("/") if err != nil || !res.IsSuccess() { - return wrapError(ctx, &resErr, res, err) + return resErr.SubmissionRejected, common.WrapRESTError(ctx, &resErr, res, err, coremsgs.MsgEthConnectorRESTErr) } - return nil + return false, nil } func (e *Ethereum) queryContractMethod(ctx context.Context, address, signingKey string, abi *abi.Entry, input []interface{}, errors []*abi.Entry, options map[string]interface{}) (*resty.Response, error) { @@ -602,14 +637,14 @@ func (e *Ethereum) queryContractMethod(ctx context.Context, address, signingKey if err != nil { return nil, err } - var resErr ethError + var resErr common.BlockchainRESTError res, err := e.client.R(). SetContext(ctx). SetBody(body). SetError(&resErr). Post("/") if err != nil || !res.IsSuccess() { - return res, wrapError(ctx, &resErr, res, err) + return res, common.WrapRESTError(ctx, &resErr, res, err, coremsgs.MsgEthConnectorRESTErr) } return res, nil } @@ -662,7 +697,8 @@ func (e *Ethereum) SubmitBatchPin(ctx context.Context, nsOpID, networkNamespace, method, input := e.buildBatchPinInput(ctx, version, networkNamespace, batch) var emptyErrors []*abi.Entry - return e.invokeContractMethod(ctx, ethLocation.Address, signingKey, method, nsOpID, input, emptyErrors, nil) + _, err = e.invokeContractMethod(ctx, ethLocation.Address, signingKey, method, nsOpID, input, emptyErrors, nil) + return err } func (e *Ethereum) SubmitNetworkAction(ctx context.Context, nsOpID string, signingKey string, action core.NetworkActionType, location *fftypes.JSONAny) error { @@ -696,10 +732,11 @@ func (e *Ethereum) SubmitNetworkAction(ctx context.Context, nsOpID string, signi } } var emptyErrors []*abi.Entry - return e.invokeContractMethod(ctx, ethLocation.Address, signingKey, method, nsOpID, input, emptyErrors, nil) + _, err = e.invokeContractMethod(ctx, ethLocation.Address, signingKey, method, nsOpID, input, emptyErrors, nil) + return err } -func (e *Ethereum) DeployContract(ctx context.Context, nsOpID, signingKey string, definition, contract *fftypes.JSONAny, input []interface{}, options map[string]interface{}) error { +func (e *Ethereum) DeployContract(ctx context.Context, nsOpID, signingKey string, definition, contract *fftypes.JSONAny, input []interface{}, options map[string]interface{}) (submissionRejected bool, err error) { if e.metrics.IsMetricsEnabled() { e.metrics.BlockchainContractDeployment() } @@ -717,12 +754,12 @@ func (e *Ethereum) DeployContract(ctx context.Context, nsOpID, signingKey string if signingKey != "" { body["from"] = signingKey } - body, err := e.applyOptions(ctx, body, options) + body, err = e.applyOptions(ctx, body, options) if err != nil { - return err + return true, err } - var resErr ethError + var resErr common.BlockchainRESTError res, err := e.client.R(). SetContext(ctx). SetBody(body). @@ -732,11 +769,11 @@ func (e *Ethereum) DeployContract(ctx context.Context, nsOpID, signingKey string if strings.Contains(string(res.Body()), "FFEC100130") { // This error is returned by ethconnect because it does not support deploying contracts with this syntax // Return a more helpful and clear error message - return i18n.NewError(ctx, coremsgs.MsgNotSupportedByBlockchainPlugin) + return true, i18n.NewError(ctx, coremsgs.MsgNotSupportedByBlockchainPlugin) } - return wrapError(ctx, &resErr, res, err) + return resErr.SubmissionRejected, common.WrapRESTError(ctx, &resErr, res, err, coremsgs.MsgEthConnectorRESTErr) } - return nil + return false, nil } // Check if a method supports passing extra data via conformance to ERC5750. @@ -751,27 +788,27 @@ func (e *Ethereum) checkDataSupport(ctx context.Context, method *abi.Entry) erro return i18n.NewError(ctx, coremsgs.MsgMethodDoesNotSupportPinning) } -func (e *Ethereum) ValidateInvokeRequest(ctx context.Context, method *fftypes.FFIMethod, input map[string]interface{}, errors []*fftypes.FFIError, hasMessage bool) error { - abi, _, _, err := e.prepareRequest(ctx, method, errors, input) +func (e *Ethereum) ValidateInvokeRequest(ctx context.Context, parsedMethod interface{}, input map[string]interface{}, hasMessage bool) error { + methodInfo, _, err := e.prepareRequest(ctx, parsedMethod, input) if err == nil && hasMessage { - if err = e.checkDataSupport(ctx, abi); err != nil { + if err = e.checkDataSupport(ctx, methodInfo.methodABI); err != nil { return err } } return err } -func (e *Ethereum) InvokeContract(ctx context.Context, nsOpID string, signingKey string, location *fftypes.JSONAny, method *fftypes.FFIMethod, input map[string]interface{}, errors []*fftypes.FFIError, options map[string]interface{}, batch *blockchain.BatchPin) error { +func (e *Ethereum) InvokeContract(ctx context.Context, nsOpID string, signingKey string, location *fftypes.JSONAny, parsedMethod interface{}, input map[string]interface{}, options map[string]interface{}, batch *blockchain.BatchPin) (bool, error) { ethereumLocation, err := e.parseContractLocation(ctx, location) if err != nil { - return err + return true, err } - abi, errorsAbi, orderedInput, err := e.prepareRequest(ctx, method, errors, input) + methodInfo, orderedInput, err := e.prepareRequest(ctx, parsedMethod, input) if err != nil { - return err + return true, err } if batch != nil { - err := e.checkDataSupport(ctx, abi) + err := e.checkDataSupport(ctx, methodInfo.methodABI) if err == nil { method, batchPin := e.buildBatchPinInput(ctx, 2, "", batch) encoded, err := method.Inputs.EncodeABIDataValuesCtx(ctx, batchPin) @@ -780,30 +817,31 @@ func (e *Ethereum) InvokeContract(ctx context.Context, nsOpID string, signingKey } } if err != nil { - return err + return true, err } } - return e.invokeContractMethod(ctx, ethereumLocation.Address, signingKey, abi, nsOpID, orderedInput, errorsAbi, options) + return e.invokeContractMethod(ctx, ethereumLocation.Address, signingKey, methodInfo.methodABI, nsOpID, orderedInput, methodInfo.errorsABI, options) } -func (e *Ethereum) QueryContract(ctx context.Context, signingKey string, location *fftypes.JSONAny, method *fftypes.FFIMethod, input map[string]interface{}, errors []*fftypes.FFIError, options map[string]interface{}) (interface{}, error) { +func (e *Ethereum) QueryContract(ctx context.Context, signingKey string, location *fftypes.JSONAny, parsedMethod interface{}, input map[string]interface{}, options map[string]interface{}) (interface{}, error) { ethereumLocation, err := e.parseContractLocation(ctx, location) if err != nil { return nil, err } - abi, errorsAbi, orderedInput, err := e.prepareRequest(ctx, method, errors, input) + methodInfo, orderedInput, err := e.prepareRequest(ctx, parsedMethod, input) if err != nil { return nil, err } - res, err := e.queryContractMethod(ctx, ethereumLocation.Address, signingKey, abi, orderedInput, errorsAbi, options) + res, err := e.queryContractMethod(ctx, ethereumLocation.Address, signingKey, methodInfo.methodABI, orderedInput, methodInfo.errorsABI, options) if err != nil || !res.IsSuccess() { return nil, err } - output := &queryOutput{} - if err = json.Unmarshal(res.Body(), output); err != nil { + + var output interface{} + if err = json.Unmarshal(res.Body(), &output); err != nil { return nil, err } - return output, nil + return output, nil // note UNLIKE fabric this is just `output`, not `output.Result` - but either way the top level of what we return to the end user, is whatever the Connector sent us } func (e *Ethereum) NormalizeContractLocation(ctx context.Context, ntype blockchain.NormalizeType, location *fftypes.JSONAny) (result *fftypes.JSONAny, err error) { @@ -905,23 +943,41 @@ func (e *Ethereum) GenerateErrorSignature(ctx context.Context, errorDef *fftypes return ffi2abi.ABIMethodToSignature(abi) } -func (e *Ethereum) prepareRequest(ctx context.Context, method *fftypes.FFIMethod, errors []*fftypes.FFIError, input map[string]interface{}) (*abi.Entry, []*abi.Entry, []interface{}, error) { - errorsAbi := make([]*abi.Entry, len(errors)) - orderedInput := make([]interface{}, len(method.Params)) - abi, err := ffi2abi.ConvertFFIMethodToABI(ctx, method) +type parsedFFIMethod struct { + methodABI *abi.Entry + errorsABI []*abi.Entry +} + +func (e *Ethereum) ParseInterface(ctx context.Context, method *fftypes.FFIMethod, errors []*fftypes.FFIError) (interface{}, error) { + methodABI, err := ffi2abi.ConvertFFIMethodToABI(ctx, method) if err != nil { - return abi, errorsAbi, orderedInput, err + return nil, err + } + methodInfo := &parsedFFIMethod{ + methodABI: methodABI, + errorsABI: make([]*abi.Entry, len(errors)), } for i, ffiError := range errors { - abi, err := ffi2abi.ConvertFFIErrorDefinitionToABI(ctx, &ffiError.FFIErrorDefinition) - if err == nil { - errorsAbi[i] = abi + errorABI, err := ffi2abi.ConvertFFIErrorDefinitionToABI(ctx, &ffiError.FFIErrorDefinition) + if err != nil { + return nil, err } + methodInfo.errorsABI[i] = errorABI } - for i, ffiParam := range method.Params { - orderedInput[i] = input[ffiParam.Name] + return methodInfo, nil +} + +func (e *Ethereum) prepareRequest(ctx context.Context, parsedMethod interface{}, input map[string]interface{}) (*parsedFFIMethod, []interface{}, error) { + methodInfo, ok := parsedMethod.(*parsedFFIMethod) + if !ok || methodInfo.methodABI == nil { + return nil, nil, i18n.NewError(ctx, coremsgs.MsgUnexpectedInterfaceType, parsedMethod) } - return abi, errorsAbi, orderedInput, nil + inputs := methodInfo.methodABI.Inputs + orderedInput := make([]interface{}, len(inputs)) + for i, param := range inputs { + orderedInput[i] = input[param.Name] + } + return methodInfo, orderedInput, nil } func (e *Ethereum) getContractAddress(ctx context.Context, instancePath string) (string, error) { @@ -944,7 +1000,7 @@ func (e *Ethereum) GenerateFFI(ctx context.Context, generationRequest *fftypes.F if err != nil { return nil, i18n.WrapError(ctx, err, coremsgs.MsgFFIGenerationFailed, "unable to deserialize JSON as ABI") } - if len(*input.ABI) == 0 { + if input.ABI == nil || len(*input.ABI) == 0 { return nil, i18n.NewError(ctx, coremsgs.MsgFFIGenerationFailed, "ABI is empty") } return ffi2abi.ConvertABIToFFI(ctx, generationRequest.Namespace, generationRequest.Name, generationRequest.Version, generationRequest.Description, input.ABI) @@ -978,6 +1034,8 @@ func (e *Ethereum) queryNetworkVersion(ctx context.Context, address string) (ver } return 0, err } + + // Leave as queryOutput as it only has one value output := &queryOutput{} if err = json.Unmarshal(res.Body(), output); err != nil { return 0, err @@ -1029,7 +1087,7 @@ func (e *Ethereum) GetTransactionStatus(ctx context.Context, operation *core.Ope transactionRequestPath := fmt.Sprintf("/transactions/%s", txnID) client := e.client - var resErr ethError + var resErr common.BlockchainRESTError var statusResponse fftypes.JSONObject res, err := client.R(). SetContext(ctx). @@ -1040,7 +1098,7 @@ func (e *Ethereum) GetTransactionStatus(ctx context.Context, operation *core.Ope if res.StatusCode() == 404 { return nil, nil } - return nil, wrapError(ctx, &resErr, res, err) + return nil, common.WrapRESTError(ctx, &resErr, res, err, coremsgs.MsgEthConnectorRESTErr) } receiptInfo := statusResponse.GetObject("receipt") diff --git a/internal/blockchain/ethereum/ethereum_test.go b/internal/blockchain/ethereum/ethereum_test.go index 4dc5f3430a..76b7214d16 100644 --- a/internal/blockchain/ethereum/ethereum_test.go +++ b/internal/blockchain/ethereum/ethereum_test.go @@ -32,6 +32,7 @@ import ( "github.com/hyperledger/firefly-common/pkg/fftls" "github.com/hyperledger/firefly-common/pkg/fftypes" "github.com/hyperledger/firefly-common/pkg/log" + "github.com/hyperledger/firefly-common/pkg/retry" "github.com/hyperledger/firefly-common/pkg/wsclient" "github.com/hyperledger/firefly/internal/blockchain/common" "github.com/hyperledger/firefly/internal/cache" @@ -45,6 +46,7 @@ import ( "github.com/hyperledger/firefly/pkg/blockchain" "github.com/hyperledger/firefly/pkg/core" "github.com/jarcoal/httpmock" + "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" ) @@ -147,7 +149,7 @@ func newTestEthereum() (*Ethereum, func()) { } func newTestStreamManager(client *resty.Client) *streamManager { - return newStreamManager(client, cache.NewUmanagedCache(context.Background(), 100, 5*time.Minute)) + return newStreamManager(client, cache.NewUmanagedCache(context.Background(), 100, 5*time.Minute), defaultBatchSize, defaultBatchTimeout) } func mockNetworkVersion(t *testing.T, version int) func(req *http.Request) (*http.Response, error) { @@ -349,6 +351,178 @@ func TestInitAndStartWithFFTM(t *testing.T) { } +func TestBackgroundStart(t *testing.T) { + + log.SetLevel("trace") + e, cancel := newTestEthereum() + defer cancel() + + toServer, fromServer, wsURL, done := wsclient.NewTestWSServer(nil) + defer done() + + mockedClient := &http.Client{} + httpmock.ActivateNonDefault(mockedClient) + defer httpmock.DeactivateAndReset() + + u, _ := url.Parse(wsURL) + u.Scheme = "http" + httpURL := u.String() + + httpmock.RegisterResponder("GET", fmt.Sprintf("%s/eventstreams", httpURL), + httpmock.NewJsonResponderOrPanic(200, []eventStream{})) + httpmock.RegisterResponder("POST", fmt.Sprintf("%s/eventstreams", httpURL), + httpmock.NewJsonResponderOrPanic(200, eventStream{ID: "es12345"})) + + resetConf(e) + utEthconnectConf.Set(ffresty.HTTPConfigURL, httpURL) + utEthconnectConf.Set(ffresty.HTTPCustomClient, mockedClient) + utEthconnectConf.Set(EthconnectConfigInstanceDeprecated, "/instances/0x71C7656EC7ab88b098defB751B7401B5f6d8976F") + utEthconnectConf.Set(EthconnectConfigTopic, "topic1") + utEthconnectConf.Set(EthconnectBackgroundStart, true) + utFFTMConf.Set(ffresty.HTTPConfigURL, "http://ethc.example.com:12345") + + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(e.ctx, 100, 5*time.Minute), nil) + err := e.Init(e.ctx, e.cancelCtx, utConfig, e.metrics, cmi) + assert.NoError(t, err) + + assert.Equal(t, "ethereum", e.Name()) + assert.Equal(t, core.VerifierTypeEthAddress, e.VerifierType()) + + assert.NoError(t, err) + + assert.NotNil(t, e.Capabilities()) + + err = e.Start() + assert.NoError(t, err) + + assert.Eventually(t, func() bool { return httpmock.GetTotalCallCount() == 2 }, time.Second*5, time.Microsecond) + assert.Eventually(t, func() bool { return e.streamID == "es12345" }, time.Second*5, time.Microsecond) + + startupMessage := <-toServer + assert.Equal(t, `{"type":"listen","topic":"topic1"}`, startupMessage) + startupMessage = <-toServer + assert.Equal(t, `{"type":"listenreplies"}`, startupMessage) + fromServer <- `[]` // empty batch, will be ignored, but acked + reply := <-toServer + assert.Equal(t, `{"type":"ack","topic":"topic1"}`, reply) + + // Bad data will be ignored + fromServer <- `!json` + fromServer <- `{"not": "a reply"}` + fromServer <- `42` + +} + +func TestBackgroundStartFail(t *testing.T) { + + log.SetLevel("trace") + e, cancel := newTestEthereum() + defer cancel() + + _, _, wsURL, done := wsclient.NewTestWSServer(nil) + defer done() + + mockedClient := &http.Client{} + httpmock.ActivateNonDefault(mockedClient) + defer httpmock.DeactivateAndReset() + + u, _ := url.Parse(wsURL) + u.Scheme = "http" + httpURL := u.String() + + httpmock.RegisterResponder("GET", fmt.Sprintf("%s/eventstreams", httpURL), + httpmock.NewJsonResponderOrPanic(500, "Failed to get eventstreams")) + + resetConf(e) + utEthconnectConf.Set(ffresty.HTTPConfigURL, httpURL) + utEthconnectConf.Set(ffresty.HTTPCustomClient, mockedClient) + utEthconnectConf.Set(EthconnectConfigInstanceDeprecated, "/instances/0x71C7656EC7ab88b098defB751B7401B5f6d8976F") + utEthconnectConf.Set(EthconnectConfigTopic, "topic1") + utEthconnectConf.Set(EthconnectBackgroundStart, true) + utFFTMConf.Set(ffresty.HTTPConfigURL, "http://ethc.example.com:12345") + + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(e.ctx, 100, 5*time.Minute), nil) + err := e.Init(e.ctx, e.cancelCtx, utConfig, e.metrics, cmi) + assert.NoError(t, err) + + assert.Equal(t, "ethereum", e.Name()) + assert.Equal(t, core.VerifierTypeEthAddress, e.VerifierType()) + + assert.NoError(t, err) + + err = e.Start() + assert.NoError(t, err) + + capturedErr := make(chan error) + e.backgroundRetry = &retry.Retry{ + ErrCallback: func(err error) { + capturedErr <- err + }, + } + + err = e.Start() + assert.NoError(t, err) + + err = <-capturedErr + assert.Regexp(t, "FF10111", err) +} + +func TestBackgroundStartWSFail(t *testing.T) { + + log.SetLevel("trace") + e, cancel := newTestEthereum() + defer cancel() + + mockedClient := &http.Client{} + httpmock.ActivateNonDefault(mockedClient) + defer httpmock.DeactivateAndReset() + + u, _ := url.Parse("http://localhost:12345") + u.Scheme = "http" + httpURL := u.String() + + httpmock.RegisterResponder("GET", fmt.Sprintf("%s/eventstreams", httpURL), + httpmock.NewJsonResponderOrPanic(200, []eventStream{})) + httpmock.RegisterResponder("POST", fmt.Sprintf("%s/eventstreams", httpURL), + httpmock.NewJsonResponderOrPanic(200, eventStream{ID: "es12345"})) + + resetConf(e) + utEthconnectConf.Set(ffresty.HTTPConfigURL, httpURL) + utEthconnectConf.Set(ffresty.HTTPCustomClient, mockedClient) + utEthconnectConf.Set(EthconnectConfigInstanceDeprecated, "/instances/0x71C7656EC7ab88b098defB751B7401B5f6d8976F") + utEthconnectConf.Set(EthconnectConfigTopic, "topic1") + utEthconnectConf.Set(EthconnectBackgroundStart, true) + utEthconnectConf.Set(wsclient.WSConfigKeyInitialConnectAttempts, 1) + utFFTMConf.Set(ffresty.HTTPConfigURL, "http://ethc.example.com:12345") + + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(e.ctx, 100, 5*time.Minute), nil) + originalContext := e.ctx + err := e.Init(e.ctx, e.cancelCtx, utConfig, &metricsmocks.Manager{}, cmi) + cmi.AssertCalled(t, "GetCache", cache.NewCacheConfig( + originalContext, + coreconfig.CacheBlockchainLimit, + coreconfig.CacheBlockchainTTL, + "", + )) + assert.NoError(t, err) + + capturedErr := make(chan error) + e.backgroundRetry = &retry.Retry{ + ErrCallback: func(err error) { + capturedErr <- err + }, + } + + err = e.Start() + assert.NoError(t, err) + + err = <-capturedErr + assert.Regexp(t, "FF00148", err) +} + func TestWSInitFail(t *testing.T) { e, cancel := newTestEthereum() @@ -564,7 +738,7 @@ func TestInitNetworkVersionNotFound(t *testing.T) { httpmock.RegisterResponder("POST", "http://localhost:12345/subscriptions", httpmock.NewJsonResponderOrPanic(200, subscription{})) httpmock.RegisterResponder("POST", "http://localhost:12345/", - httpmock.NewJsonResponderOrPanic(500, ethError{Error: "FFEC100148"})) + httpmock.NewJsonResponderOrPanic(500, common.BlockchainRESTError{Error: "FFEC100148"})) resetConf(e) utEthconnectConf.Set(ffresty.HTTPConfigURL, "http://localhost:12345") @@ -1262,25 +1436,27 @@ func TestHandleMessageBatchPinOK(t *testing.T) { Value: "0x91d2b4381a4cd5c7c0f27565a7d4b829844c8635", } - em.On("BatchPinComplete", "ns1", mock.Anything, expectedSigningKeyRef, mock.Anything).Return(nil) + em.On("BlockchainEventBatch", mock.MatchedBy(func(events []*blockchain.EventToDispatch) bool { + return len(events) == 2 && + events[0].Type == blockchain.EventTypeBatchPinComplete && + *events[0].BatchPinComplete.SigningKey == *expectedSigningKeyRef + })).Return(nil) var events []interface{} err := json.Unmarshal(data.Bytes(), &events) assert.NoError(t, err) - err = e.handleMessageBatch(context.Background(), events) + err = e.handleMessageBatch(context.Background(), 0, events) assert.NoError(t, err) - em.AssertExpectations(t) - - b := em.Calls[0].Arguments[1].(*blockchain.BatchPin) - assert.Equal(t, "e19af8b3-9060-4051-812d-7597d19adfb9", b.TransactionID.String()) - assert.Equal(t, "847d3bfd-0742-49ef-b65d-3fed15f5b0a6", b.BatchID.String()) - assert.Equal(t, "d71eb138d74c229a388eb0e1abc03f4c7cbb21d4fc4b839fbf0ec73e4263f6be", b.BatchHash.String()) - assert.Equal(t, "Qmf412jQZiuVUtdgnB36FXFX7xg5V6KEbSJ4dpQuhkLyfD", b.BatchPayloadRef) - assert.Equal(t, expectedSigningKeyRef, em.Calls[0].Arguments[2]) - assert.Len(t, b.Contexts, 2) - assert.Equal(t, "68e4da79f805bca5b912bcda9c63d03e6e867108dabb9b944109aea541ef522a", b.Contexts[0].String()) - assert.Equal(t, "19b82093de5ce92a01e333048e877e2374354bf846dd034864ef6ffbd6438771", b.Contexts[1].String()) + b := em.Calls[0].Arguments[0].([]*blockchain.EventToDispatch)[0].BatchPinComplete + assert.Equal(t, "e19af8b3-9060-4051-812d-7597d19adfb9", b.Batch.TransactionID.String()) + assert.Equal(t, "847d3bfd-0742-49ef-b65d-3fed15f5b0a6", b.Batch.BatchID.String()) + assert.Equal(t, "d71eb138d74c229a388eb0e1abc03f4c7cbb21d4fc4b839fbf0ec73e4263f6be", b.Batch.BatchHash.String()) + assert.Equal(t, "Qmf412jQZiuVUtdgnB36FXFX7xg5V6KEbSJ4dpQuhkLyfD", b.Batch.BatchPayloadRef) + assert.Equal(t, expectedSigningKeyRef, b.SigningKey) + assert.Len(t, b.Batch.Contexts, 2) + assert.Equal(t, "68e4da79f805bca5b912bcda9c63d03e6e867108dabb9b944109aea541ef522a", b.Batch.Contexts[0].String()) + assert.Equal(t, "19b82093de5ce92a01e333048e877e2374354bf846dd034864ef6ffbd6438771", b.Batch.Contexts[1].String()) info1 := fftypes.JSONObject{ "address": "0x1C197604587F046FD40684A8f21f4609FB811A7b", @@ -1292,9 +1468,9 @@ func TestHandleMessageBatchPinOK(t *testing.T) { "transactionIndex": "0x0", "timestamp": "1620576488", } - assert.Equal(t, info1, b.Event.Info) + assert.Equal(t, info1, b.Batch.Event.Info) - b2 := em.Calls[1].Arguments[1].(*blockchain.BatchPin) + b2 := em.Calls[0].Arguments[0].([]*blockchain.EventToDispatch)[1].BatchPinComplete.Batch info2 := fftypes.JSONObject{ "address": "0x1C197604587F046FD40684A8f21f4609FB811A7b", "blockNumber": "38011", @@ -1307,6 +1483,8 @@ func TestHandleMessageBatchPinOK(t *testing.T) { } assert.Equal(t, info2, b2.Event.Info) + em.AssertExpectations(t) + } func TestHandleMessageBatchPinMissingAddress(t *testing.T) { @@ -1350,14 +1528,15 @@ func TestHandleMessageBatchPinMissingAddress(t *testing.T) { var events []interface{} err := json.Unmarshal(data.Bytes(), &events) assert.NoError(t, err) - err = e.handleMessageBatch(context.Background(), events) + err = e.handleMessageBatch(context.Background(), 0, events) assert.Regexp(t, "FF10141", err) } func TestHandleMessageBatchPinEmpty(t *testing.T) { e := &Ethereum{ - subs: common.NewFireflySubscriptions(), + subs: common.NewFireflySubscriptions(), + callbacks: common.NewBlockchainCallbacks(), } e.subs.AddSubscription( context.Background(), @@ -1375,13 +1554,14 @@ func TestHandleMessageBatchPinEmpty(t *testing.T) { } ]`), &events) assert.NoError(t, err) - err = e.handleMessageBatch(context.Background(), events) + err = e.handleMessageBatch(context.Background(), 0, events) assert.NoError(t, err) } func TestHandleMessageBatchMissingData(t *testing.T) { e := &Ethereum{ - subs: common.NewFireflySubscriptions(), + subs: common.NewFireflySubscriptions(), + callbacks: common.NewBlockchainCallbacks(), } e.subs.AddSubscription( context.Background(), @@ -1400,7 +1580,7 @@ func TestHandleMessageBatchMissingData(t *testing.T) { } ]`), &events) assert.NoError(t, err) - err = e.handleMessageBatch(context.Background(), events) + err = e.handleMessageBatch(context.Background(), 0, events) assert.NoError(t, err) } @@ -1438,13 +1618,14 @@ func TestHandleMessageBatchPinBadTransactionID(t *testing.T) { var events []interface{} err := json.Unmarshal(data.Bytes(), &events) assert.NoError(t, err) - err = e.handleMessageBatch(context.Background(), events) + err = e.handleMessageBatch(context.Background(), 0, events) assert.NoError(t, err) } func TestHandleMessageBatchPinBadIDentity(t *testing.T) { e := &Ethereum{ - subs: common.NewFireflySubscriptions(), + subs: common.NewFireflySubscriptions(), + callbacks: common.NewBlockchainCallbacks(), } e.subs.AddSubscription( context.Background(), @@ -1475,13 +1656,13 @@ func TestHandleMessageBatchPinBadIDentity(t *testing.T) { var events []interface{} err := json.Unmarshal(data.Bytes(), &events) assert.NoError(t, err) - err = e.handleMessageBatch(context.Background(), events) + err = e.handleMessageBatch(context.Background(), 0, events) assert.NoError(t, err) } func TestHandleMessageBatchBadJSON(t *testing.T) { e := &Ethereum{} - err := e.handleMessageBatch(context.Background(), []interface{}{10, 20}) + err := e.handleMessageBatch(context.Background(), 0, []interface{}{10, 20}) assert.NoError(t, err) } @@ -1959,6 +2140,15 @@ func TestDeleteSubscriptionNotFound(t *testing.T) { assert.NoError(t, err) } +func matchBatchWithEvent(protocolID, blockchainTXID string) interface{} { + return mock.MatchedBy(func(batch []*blockchain.EventToDispatch) bool { + return len(batch) == 1 && + batch[0].Type == blockchain.EventTypeForListener && + batch[0].ForListener.ProtocolID == protocolID && + batch[0].ForListener.BlockchainTXID == blockchainTXID + }) +} + func TestHandleMessageContractEventOldSubscription(t *testing.T) { data := fftypes.JSONAnyPtr(` [ @@ -1997,27 +2187,26 @@ func TestHandleMessageContractEventOldSubscription(t *testing.T) { ) e.streams = newTestStreamManager(e.client) - em.On("BlockchainEvent", mock.MatchedBy(func(e *blockchain.EventWithSubscription) bool { - assert.Equal(t, "0xc26df2bf1a733e9249372d61eb11bd8662d26c8129df76890b1beb2f6fa72628", e.BlockchainTXID) - assert.Equal(t, "000000038011/000000/000050", e.Event.ProtocolID) - return true - })).Return(nil) + em.On("BlockchainEventBatch", matchBatchWithEvent( + "000000038011/000000/000050", + "0xc26df2bf1a733e9249372d61eb11bd8662d26c8129df76890b1beb2f6fa72628", + )).Return(nil) var events []interface{} err := json.Unmarshal(data.Bytes(), &events) assert.NoError(t, err) - err = e.handleMessageBatch(context.Background(), events) + err = e.handleMessageBatch(context.Background(), 0, events) assert.NoError(t, err) - ev := em.Calls[0].Arguments[0].(*blockchain.EventWithSubscription) - assert.Equal(t, "sub2", ev.Subscription) - assert.Equal(t, "Changed", ev.Event.Name) + ev := em.Calls[0].Arguments[0].([]*blockchain.EventToDispatch)[0] + assert.Equal(t, "sub2", ev.ForListener.ListenerID) + assert.Equal(t, "Changed", ev.ForListener.Event.Name) outputs := fftypes.JSONObject{ "from": "0x91D2B4381A4CD5C7C0F27565A7D4B829844C8635", "value": "1", } - assert.Equal(t, outputs, ev.Event.Output) + assert.Equal(t, outputs, ev.ForListener.Event.Output) info := fftypes.JSONObject{ "address": "0x1C197604587F046FD40684A8f21f4609FB811A7b", @@ -2029,7 +2218,7 @@ func TestHandleMessageContractEventOldSubscription(t *testing.T) { "transactionIndex": "0x0", "timestamp": "1640811383", } - assert.Equal(t, info, ev.Event.Info) + assert.Equal(t, info, ev.ForListener.Event.Info) em.AssertExpectations(t) } @@ -2072,12 +2261,12 @@ func TestHandleMessageContractEventErrorOldSubscription(t *testing.T) { 1, "sb-b5b97a4e-a317-4053-6400-1474650efcb5", nil, ) e.streams = newTestStreamManager(e.client) - em.On("BlockchainEvent", mock.Anything).Return(fmt.Errorf("pop")) + em.On("BlockchainEventBatch", mock.Anything).Return(fmt.Errorf("pop")) var events []interface{} err := json.Unmarshal(data.Bytes(), &events) assert.NoError(t, err) - err = e.handleMessageBatch(context.Background(), events) + err = e.handleMessageBatch(context.Background(), 0, events) assert.EqualError(t, err, "pop") em.AssertExpectations(t) @@ -2136,26 +2325,25 @@ func TestHandleMessageContractEventWithNamespace(t *testing.T) { ) e.streams = newTestStreamManager(e.client) - em.On("BlockchainEvent", mock.MatchedBy(func(e *blockchain.EventWithSubscription) bool { - assert.Equal(t, "000000038011/000000/000050", e.Event.ProtocolID) - return true + em.On("BlockchainEventBatch", mock.MatchedBy(func(batch []*blockchain.EventToDispatch) bool { + return len(batch) == 2 })).Return(nil) var events []interface{} err := json.Unmarshal(data.Bytes(), &events) assert.NoError(t, err) - err = e.handleMessageBatch(context.Background(), events) + err = e.handleMessageBatch(context.Background(), 0, events) assert.NoError(t, err) - ev := em.Calls[0].Arguments[0].(*blockchain.EventWithSubscription) - assert.Equal(t, "sub2", ev.Subscription) - assert.Equal(t, "Changed", ev.Event.Name) + ev := em.Calls[0].Arguments[0].([]*blockchain.EventToDispatch)[0] + assert.Equal(t, "sub2", ev.ForListener.ListenerID) + assert.Equal(t, "Changed", ev.ForListener.Event.Name) outputs := fftypes.JSONObject{ "from": "0x91D2B4381A4CD5C7C0F27565A7D4B829844C8635", "value": "1", } - assert.Equal(t, outputs, ev.Event.Output) + assert.Equal(t, outputs, ev.ForListener.Event.Output) info := fftypes.JSONObject{ "address": "0x1C197604587F046FD40684A8f21f4609FB811A7b", @@ -2167,7 +2355,7 @@ func TestHandleMessageContractEventWithNamespace(t *testing.T) { "transactionIndex": "0x0", "timestamp": "1640811383", } - assert.Equal(t, info, ev.Event.Info) + assert.Equal(t, info, ev.ForListener.Event.Info) em.AssertExpectations(t) } @@ -2199,7 +2387,7 @@ func TestHandleMessageContractEventNoNamespaceHandlers(t *testing.T) { httpmock.RegisterResponder("GET", "http://localhost:12345/subscriptions/sub2", httpmock.NewJsonResponderOrPanic(200, subscription{ - ID: "sub2", Stream: "es12345", Name: "ff-sub-ns1-1132312312312", + ID: "sub2", Stream: "es12345", Name: "ff-sub-ns1-58113723-0cc3-411f-aa1b-948eca83b9cd", })) e.SetHandler("ns2", em) @@ -2210,16 +2398,15 @@ func TestHandleMessageContractEventNoNamespaceHandlers(t *testing.T) { ) e.streams = newTestStreamManager(e.client) - em.On("BlockchainEvent", mock.MatchedBy(func(e *blockchain.EventWithSubscription) bool { - assert.Equal(t, "0xc26df2bf1a733e9249372d61eb11bd8662d26c8129df76890b1beb2f6fa72628", e.BlockchainTXID) - assert.Equal(t, "000000038011/000000/000050", e.Event.ProtocolID) - return true - })).Return(nil) + em.On("BlockchainEventBatch", matchBatchWithEvent( + "000000038011/000000/000050", + "0xc26df2bf1a733e9249372d61eb11bd8662d26c8129df76890b1beb2f6fa72628", + )).Return(nil) var events []interface{} err := json.Unmarshal(data.Bytes(), &events) assert.NoError(t, err) - err = e.handleMessageBatch(context.Background(), events) + err = e.handleMessageBatch(context.Background(), 0, events) assert.NoError(t, err) assert.Equal(t, 0, len(em.Calls)) } @@ -2250,7 +2437,7 @@ func TestHandleMessageContractEventSubNameError(t *testing.T) { defer httpmock.DeactivateAndReset() httpmock.RegisterResponder("GET", "http://localhost:12345/subscriptions/sub2", - httpmock.NewJsonResponderOrPanic(500, ethError{Error: "pop"})) + httpmock.NewJsonResponderOrPanic(500, common.BlockchainRESTError{Error: "pop"})) e.callbacks = common.NewBlockchainCallbacks() e.SetHandler("ns1", em) @@ -2264,7 +2451,7 @@ func TestHandleMessageContractEventSubNameError(t *testing.T) { var events []interface{} err := json.Unmarshal(data.Bytes(), &events) assert.NoError(t, err) - err = e.handleMessageBatch(context.Background(), events) + err = e.handleMessageBatch(context.Background(), 0, events) assert.Regexp(t, "FF10111", err) em.AssertExpectations(t) @@ -2309,12 +2496,12 @@ func TestHandleMessageContractEventError(t *testing.T) { ) e.streams = newTestStreamManager(e.client) - em.On("BlockchainEvent", mock.Anything).Return(fmt.Errorf("pop")) + em.On("BlockchainEventBatch", mock.Anything).Return(fmt.Errorf("pop")) var events []interface{} err := json.Unmarshal(data.Bytes(), &events) assert.NoError(t, err) - err = e.handleMessageBatch(context.Background(), events) + err = e.handleMessageBatch(context.Background(), 0, events) assert.EqualError(t, err, "pop") em.AssertExpectations(t) @@ -2348,7 +2535,7 @@ func TestDeployContractOK(t *testing.T) { assert.Equal(t, body["customOption"].(string), "customValue") return httpmock.NewJsonResponderOrPanic(200, "")(req) }) - err = e.DeployContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(definitionBytes), fftypes.JSONAnyPtrBytes(contractBytes), input, options) + _, err = e.DeployContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(definitionBytes), fftypes.JSONAnyPtrBytes(contractBytes), input, options) assert.NoError(t, err) } @@ -2370,10 +2557,36 @@ func TestDeployContractFFEC100130(t *testing.T) { assert.NoError(t, err) httpmock.RegisterResponder("POST", `http://localhost:12345/`, func(req *http.Request) (*http.Response, error) { - return httpmock.NewJsonResponderOrPanic(500, `{"error":"FFEC100130: failure"}`)(req) + return httpmock.NewJsonResponderOrPanic(500, fftypes.JSONAnyPtr(`{"error":"FFEC100130: failure"}`))(req) }) - err = e.DeployContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(definitionBytes), fftypes.JSONAnyPtrBytes(contractBytes), input, options) + submissionRejected, err := e.DeployContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(definitionBytes), fftypes.JSONAnyPtrBytes(contractBytes), input, options) assert.Regexp(t, "FF10429", err) + assert.True(t, submissionRejected) +} + +func TestDeployContractRevert(t *testing.T) { + e, cancel := newTestEthereum() + defer cancel() + httpmock.ActivateNonDefault(e.client.GetClient()) + defer httpmock.DeactivateAndReset() + signingKey := ethHexFormatB32(fftypes.NewRandB32()) + input := []interface{}{ + float64(1), + "1000000000000000000000000", + } + options := map[string]interface{}{ + "customOption": "customValue", + } + definitionBytes, err := json.Marshal([]interface{}{}) + contractBytes, err := json.Marshal("0x123456") + assert.NoError(t, err) + httpmock.RegisterResponder("POST", `http://localhost:12345/`, + func(req *http.Request) (*http.Response, error) { + return httpmock.NewJsonResponderOrPanic(500, fftypes.JSONAnyPtr(`{"error":"FF23021: EVM reverted", "submissionRejected": true}`))(req) + }) + submissionRejected, err := e.DeployContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(definitionBytes), fftypes.JSONAnyPtrBytes(contractBytes), input, options) + assert.Regexp(t, "FF10111.*FF23021", err) + assert.True(t, submissionRejected) } func TestDeployContractInvalidOption(t *testing.T) { @@ -2404,8 +2617,9 @@ func TestDeployContractInvalidOption(t *testing.T) { assert.Equal(t, body["customOption"].(string), "customValue") return httpmock.NewJsonResponderOrPanic(400, "pop")(req) }) - err = e.DeployContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(definitionBytes), fftypes.JSONAnyPtrBytes(contractBytes), input, options) + submissionRejected, err := e.DeployContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(definitionBytes), fftypes.JSONAnyPtrBytes(contractBytes), input, options) assert.Regexp(t, "FF10398", err) + assert.True(t, submissionRejected) } func TestDeployContractError(t *testing.T) { @@ -2436,11 +2650,13 @@ func TestDeployContractError(t *testing.T) { assert.Equal(t, body["customOption"].(string), "customValue") return httpmock.NewJsonResponderOrPanic(400, "pop")(req) }) - err = e.DeployContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(definitionBytes), fftypes.JSONAnyPtrBytes(contractBytes), input, options) + submissionRejected, err := e.DeployContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(definitionBytes), fftypes.JSONAnyPtrBytes(contractBytes), input, options) assert.Regexp(t, "FF10111", err) + assert.False(t, submissionRejected) } func TestInvokeContractOK(t *testing.T) { + logrus.SetLevel(logrus.DebugLevel) e, cancel := newTestEthereum() defer cancel() httpmock.ActivateNonDefault(e.client.GetClient()) @@ -2472,7 +2688,9 @@ func TestInvokeContractOK(t *testing.T) { assert.Equal(t, body["customOption"].(string), "customValue") return httpmock.NewJsonResponderOrPanic(200, "")(req) }) - err = e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), method, params, errors, options, nil) + parsedMethod, err := e.ParseInterface(context.Background(), method, errors) + assert.NoError(t, err) + _, err = e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options, nil) assert.NoError(t, err) } @@ -2511,7 +2729,9 @@ func TestInvokeContractWithBatchOK(t *testing.T) { return httpmock.NewJsonResponderOrPanic(200, "")(req) }) - err = e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), method, nil, errors, nil, batch) + parsedMethod, err := e.ParseInterface(context.Background(), method, errors) + assert.NoError(t, err) + _, err = e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, nil, nil, batch) assert.NoError(t, err) } @@ -2527,8 +2747,11 @@ func TestInvokeContractWithBatchUnsupported(t *testing.T) { method := testFFIMethod() errors := testFFIErrors() batch := &blockchain.BatchPin{} - err = e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), method, nil, errors, nil, batch) + parsedMethod, err := e.ParseInterface(context.Background(), method, errors) + assert.NoError(t, err) + submissionRejected, err := e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, nil, nil, batch) assert.Regexp(t, "FF10443", err) + assert.True(t, submissionRejected) } func TestInvokeContractInvalidOption(t *testing.T) { @@ -2562,8 +2785,11 @@ func TestInvokeContractInvalidOption(t *testing.T) { assert.Equal(t, "1000000000000000000000000", params[1]) return httpmock.NewJsonResponderOrPanic(200, "")(req) }) - err = e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), method, params, errors, options, nil) + parsedMethod, err := e.ParseInterface(context.Background(), method, errors) + assert.NoError(t, err) + submissionRejected, err := e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options, nil) assert.Regexp(t, "FF10398", err) + assert.True(t, submissionRejected) } func TestInvokeContractInvalidInput(t *testing.T) { @@ -2598,7 +2824,9 @@ func TestInvokeContractInvalidInput(t *testing.T) { assert.Equal(t, body["customOption"].(string), "customValue") return httpmock.NewJsonResponderOrPanic(200, "")(req) }) - err = e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), method, params, errors, options, nil) + parsedMethod, err := e.ParseInterface(context.Background(), method, errors) + assert.NoError(t, err) + _, err = e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options, nil) assert.Regexp(t, "unsupported type", err) } @@ -2616,8 +2844,11 @@ func TestInvokeContractAddressNotSet(t *testing.T) { options := map[string]interface{}{} locationBytes, err := json.Marshal(location) assert.NoError(t, err) - err = e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), method, params, errors, options, nil) + parsedMethod, err := e.ParseInterface(context.Background(), method, errors) + assert.NoError(t, err) + submissionRejected, err := e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options, nil) assert.Regexp(t, "'address' not set", err) + assert.True(t, submissionRejected) } func TestInvokeContractEthconnectError(t *testing.T) { @@ -2642,8 +2873,40 @@ func TestInvokeContractEthconnectError(t *testing.T) { func(req *http.Request) (*http.Response, error) { return httpmock.NewJsonResponderOrPanic(400, "")(req) }) - err = e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), method, params, errors, options, nil) + parsedMethod, err := e.ParseInterface(context.Background(), method, errors) + assert.NoError(t, err) + submissionRejected, err := e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options, nil) + assert.Regexp(t, "FF10111", err) + assert.False(t, submissionRejected) +} + +func TestInvokeContractEVMConnectRejectErr(t *testing.T) { + e, cancel := newTestEthereum() + defer cancel() + httpmock.ActivateNonDefault(e.client.GetClient()) + defer httpmock.DeactivateAndReset() + signingKey := ethHexFormatB32(fftypes.NewRandB32()) + location := &Location{ + Address: "0x12345", + } + method := testFFIMethod() + errors := testFFIErrors() + params := map[string]interface{}{ + "x": float64(1), + "y": float64(2), + } + options := map[string]interface{}{} + locationBytes, err := json.Marshal(location) + assert.NoError(t, err) + httpmock.RegisterResponder("POST", `http://localhost:12345/`, + func(req *http.Request) (*http.Response, error) { + return httpmock.NewJsonResponderOrPanic(400, fftypes.JSONAnyPtr(`{"error":"FF23021: EVM reverted", "submissionRejected": true}`))(req) + }) + parsedMethod, err := e.ParseInterface(context.Background(), method, errors) + assert.NoError(t, err) + submissionRejected, err := e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options, nil) assert.Regexp(t, "FF10111", err) + assert.True(t, submissionRejected) } func TestInvokeContractPrepareFail(t *testing.T) { @@ -2655,23 +2918,57 @@ func TestInvokeContractPrepareFail(t *testing.T) { location := &Location{ Address: "0x12345", } + params := map[string]interface{}{ + "x": float64(1), + "y": float64(2), + } + options := map[string]interface{}{} + locationBytes, err := json.Marshal(location) + assert.NoError(t, err) + submissionRejected, err := e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), "wrong", params, options, nil) + assert.Regexp(t, "FF10457", err) + assert.True(t, submissionRejected) +} + +func TestParseInterfaceFailFFIMethod(t *testing.T) { + e, cancel := newTestEthereum() + defer cancel() + httpmock.ActivateNonDefault(e.client.GetClient()) + defer httpmock.DeactivateAndReset() method := &fftypes.FFIMethod{ - Name: "set", Params: fftypes.FFIParams{ { - Schema: fftypes.JSONAnyPtr("{bad schema!"), + Name: "bad", + Schema: fftypes.JSONAnyPtr("{badschema}"), }, }, } errors := testFFIErrors() - params := map[string]interface{}{ - "x": float64(1), - "y": float64(2), + _, err := e.ParseInterface(context.Background(), method, errors) + assert.Regexp(t, "invalid json", err) +} + +func TestParseInterfaceFailFFIError(t *testing.T) { + e, cancel := newTestEthereum() + defer cancel() + httpmock.ActivateNonDefault(e.client.GetClient()) + defer httpmock.DeactivateAndReset() + method := &fftypes.FFIMethod{ + Params: fftypes.FFIParams{}, } - options := map[string]interface{}{} - locationBytes, err := json.Marshal(location) - assert.NoError(t, err) - err = e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), method, params, errors, options, nil) + errors := []*fftypes.FFIError{ + { + FFIErrorDefinition: fftypes.FFIErrorDefinition{ + Params: fftypes.FFIParams{ + { + Name: "bad", + Schema: fftypes.JSONAnyPtr("{badschema}"), + }, + }, + }, + }, + } + _, err := e.ParseInterface(context.Background(), method, errors) assert.Regexp(t, "invalid json", err) } @@ -2702,13 +2999,116 @@ func TestQueryContractOK(t *testing.T) { assert.Equal(t, "0x01020304", body["from"].(string)) return httpmock.NewJsonResponderOrPanic(200, queryOutput{Output: "3"})(req) }) - result, err := e.QueryContract(context.Background(), "0x01020304", fftypes.JSONAnyPtrBytes(locationBytes), method, params, errors, options) + parsedMethod, err := e.ParseInterface(context.Background(), method, errors) + assert.NoError(t, err) + result, err := e.QueryContract(context.Background(), "0x01020304", fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options) assert.NoError(t, err) j, err := json.Marshal(result) assert.NoError(t, err) assert.Equal(t, `{"output":"3"}`, string(j)) } +func TestQueryContractMultipleUnnamedOutputOK(t *testing.T) { + e, cancel := newTestEthereum() + defer cancel() + httpmock.ActivateNonDefault(e.client.GetClient()) + defer httpmock.DeactivateAndReset() + location := &Location{ + Address: "0x12345", + } + method := testFFIMethod() + errors := testFFIErrors() + params := map[string]interface{}{} + options := map[string]interface{}{ + "customOption": "customValue", + } + + outputStruct := struct { + Test string `json:"test"` + Value int `json:"value"` + }{ + Test: "myvalue", + Value: 3, + } + + output := map[string]interface{}{ + "output": "foo", + "output1": outputStruct, + "anything": 3, + } + + locationBytes, err := json.Marshal(location) + assert.NoError(t, err) + httpmock.RegisterResponder("POST", `http://localhost:12345/`, + func(req *http.Request) (*http.Response, error) { + var body map[string]interface{} + json.NewDecoder(req.Body).Decode(&body) + headers := body["headers"].(map[string]interface{}) + assert.Equal(t, "Query", headers["type"]) + assert.Equal(t, "customValue", body["customOption"].(string)) + assert.Equal(t, "0x12345", body["to"].(string)) + assert.Equal(t, "0x01020304", body["from"].(string)) + return httpmock.NewJsonResponderOrPanic(200, output)(req) + }) + parsedMethod, err := e.ParseInterface(context.Background(), method, errors) + assert.NoError(t, err) + result, err := e.QueryContract(context.Background(), "0x01020304", fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options) + assert.NoError(t, err) + j, err := json.Marshal(result) + assert.NoError(t, err) + assert.Equal(t, `{"anything":3,"output":"foo","output1":{"test":"myvalue","value":3}}`, string(j)) +} + +func TestQueryContractNamedOutputOK(t *testing.T) { + e, cancel := newTestEthereum() + defer cancel() + httpmock.ActivateNonDefault(e.client.GetClient()) + defer httpmock.DeactivateAndReset() + location := &Location{ + Address: "0x12345", + } + method := testFFIMethod() + errors := testFFIErrors() + params := map[string]interface{}{} + options := map[string]interface{}{ + "customOption": "customValue", + } + + outputStruct := struct { + Test string `json:"test"` + Value int `json:"value"` + }{ + Test: "myvalue", + Value: 3, + } + + output := map[string]interface{}{ + "mynamedparam": "foo", + "mynamedstruct": outputStruct, + } + + locationBytes, err := json.Marshal(location) + assert.NoError(t, err) + httpmock.RegisterResponder("POST", `http://localhost:12345/`, + func(req *http.Request) (*http.Response, error) { + var body map[string]interface{} + json.NewDecoder(req.Body).Decode(&body) + headers := body["headers"].(map[string]interface{}) + assert.Equal(t, "Query", headers["type"]) + assert.Equal(t, "customValue", body["customOption"].(string)) + assert.Equal(t, "0x12345", body["to"].(string)) + assert.Equal(t, "0x01020304", body["from"].(string)) + return httpmock.NewJsonResponderOrPanic(200, output)(req) + }) + parsedMethod, err := e.ParseInterface(context.Background(), method, errors) + assert.NoError(t, err) + result, err := e.QueryContract(context.Background(), "0x01020304", fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options) + assert.NoError(t, err) + j, err := json.Marshal(result) + assert.NoError(t, err) + assert.Equal(t, `{"mynamedparam":"foo","mynamedstruct":{"test":"myvalue","value":3}}`, string(j)) +} + func TestQueryContractInvalidOption(t *testing.T) { e, cancel := newTestEthereum() defer cancel() @@ -2733,7 +3133,9 @@ func TestQueryContractInvalidOption(t *testing.T) { assert.Equal(t, "Query", headers["type"]) return httpmock.NewJsonResponderOrPanic(200, queryOutput{Output: "3"})(req) }) - _, err = e.QueryContract(context.Background(), "0x01020304", fftypes.JSONAnyPtrBytes(locationBytes), method, params, errors, options) + parsedMethod, err := e.ParseInterface(context.Background(), method, errors) + assert.NoError(t, err) + _, err = e.QueryContract(context.Background(), "0x01020304", fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options) assert.Regexp(t, "FF10398", err) } @@ -2745,21 +3147,12 @@ func TestQueryContractErrorPrepare(t *testing.T) { location := &Location{ Address: "0x12345", } - method := &fftypes.FFIMethod{ - Params: fftypes.FFIParams{ - { - Name: "bad", - Schema: fftypes.JSONAnyPtr("{badschema}"), - }, - }, - } - errors := testFFIErrors() params := map[string]interface{}{} options := map[string]interface{}{} locationBytes, err := json.Marshal(location) assert.NoError(t, err) - _, err = e.QueryContract(context.Background(), "0x01020304", fftypes.JSONAnyPtrBytes(locationBytes), method, params, errors, options) - assert.Regexp(t, "invalid json", err) + _, err = e.QueryContract(context.Background(), "0x01020304", fftypes.JSONAnyPtrBytes(locationBytes), "wrong type", params, options) + assert.Regexp(t, "FF10457", err) } func TestQueryContractAddressNotSet(t *testing.T) { @@ -2775,7 +3168,9 @@ func TestQueryContractAddressNotSet(t *testing.T) { options := map[string]interface{}{} locationBytes, err := json.Marshal(location) assert.NoError(t, err) - _, err = e.QueryContract(context.Background(), "0x01020304", fftypes.JSONAnyPtrBytes(locationBytes), method, params, errors, options) + parsedMethod, err := e.ParseInterface(context.Background(), method, errors) + assert.NoError(t, err) + _, err = e.QueryContract(context.Background(), "0x01020304", fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options) assert.Regexp(t, "'address' not set", err) } @@ -2800,7 +3195,9 @@ func TestQueryContractEthconnectError(t *testing.T) { func(req *http.Request) (*http.Response, error) { return httpmock.NewJsonResponderOrPanic(400, queryOutput{})(req) }) - _, err = e.QueryContract(context.Background(), "0x01020304", fftypes.JSONAnyPtrBytes(locationBytes), method, params, errors, options) + parsedMethod, err := e.ParseInterface(context.Background(), method, errors) + assert.NoError(t, err) + _, err = e.QueryContract(context.Background(), "0x01020304", fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options) assert.Regexp(t, "FF10111", err) } @@ -2829,7 +3226,9 @@ func TestQueryContractUnmarshalResponseError(t *testing.T) { assert.Equal(t, "Query", headers["type"]) return httpmock.NewStringResponder(200, "[definitely not JSON}")(req) }) - _, err = e.QueryContract(context.Background(), "0x01020304", fftypes.JSONAnyPtrBytes(locationBytes), method, params, errors, options) + parsedMethod, err := e.ParseInterface(context.Background(), method, errors) + assert.NoError(t, err) + _, err = e.QueryContract(context.Background(), "0x01020304", fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options) assert.Regexp(t, "invalid character", err) } @@ -3156,7 +3555,7 @@ func TestSubmitNetworkActionVersionFail(t *testing.T) { httpmock.ActivateNonDefault(e.client.GetClient()) defer httpmock.DeactivateAndReset() httpmock.RegisterResponder("POST", `http://localhost:12345/`, - httpmock.NewJsonResponderOrPanic(500, ethError{Error: "unknown"})) + httpmock.NewJsonResponderOrPanic(500, common.BlockchainRESTError{Error: "unknown"})) location := fftypes.JSONAnyPtr(fftypes.JSONObject{ "address": "0x123", @@ -3166,6 +3565,15 @@ func TestSubmitNetworkActionVersionFail(t *testing.T) { assert.Regexp(t, "FF10111", err) } +func matchNetworkAction(action string, expectedSigningKey core.VerifierRef) interface{} { + return mock.MatchedBy(func(batch []*blockchain.EventToDispatch) bool { + return len(batch) == 1 && + batch[0].Type == blockchain.EventTypeNetworkAction && + batch[0].NetworkAction.Action == action && + *batch[0].NetworkAction.SigningKey == expectedSigningKey + }) +} + func TestHandleNetworkAction(t *testing.T) { data := fftypes.JSONAnyPtr(` [ @@ -3206,12 +3614,12 @@ func TestHandleNetworkAction(t *testing.T) { Value: "0x91d2b4381a4cd5c7c0f27565a7d4b829844c8635", } - em.On("BlockchainNetworkAction", "terminate", mock.AnythingOfType("*fftypes.JSONAny"), mock.AnythingOfType("*blockchain.Event"), expectedSigningKeyRef).Return(nil) + em.On("BlockchainEventBatch", matchNetworkAction("terminate", *expectedSigningKeyRef)).Return(nil) var events []interface{} err := json.Unmarshal(data.Bytes(), &events) assert.NoError(t, err) - err = e.handleMessageBatch(context.Background(), events) + err = e.handleMessageBatch(context.Background(), 0, events) assert.NoError(t, err) em.AssertExpectations(t) @@ -3258,12 +3666,12 @@ func TestHandleNetworkActionFail(t *testing.T) { Value: "0x91d2b4381a4cd5c7c0f27565a7d4b829844c8635", } - em.On("BlockchainNetworkAction", "terminate", mock.AnythingOfType("*fftypes.JSONAny"), mock.AnythingOfType("*blockchain.Event"), expectedSigningKeyRef).Return(fmt.Errorf("pop")) + em.On("BlockchainEventBatch", matchNetworkAction("terminate", *expectedSigningKeyRef)).Return(fmt.Errorf("pop")) var events []interface{} err := json.Unmarshal(data.Bytes(), &events) assert.NoError(t, err) - err = e.handleMessageBatch(context.Background(), events) + err = e.handleMessageBatch(context.Background(), 0, events) assert.EqualError(t, err, "pop") em.AssertExpectations(t) @@ -3340,7 +3748,7 @@ func TestGetNetworkVersionMethodNotFound(t *testing.T) { }.String()) httpmock.RegisterResponder("POST", "http://localhost:12345/", - httpmock.NewJsonResponderOrPanic(500, ethError{Error: "FFEC100148"})) + httpmock.NewJsonResponderOrPanic(500, common.BlockchainRESTError{Error: "FFEC100148"})) version, err := e.GetNetworkVersion(context.Background(), location) @@ -3358,7 +3766,7 @@ func TestGetNetworkVersionQueryFail(t *testing.T) { }.String()) httpmock.RegisterResponder("POST", "http://localhost:12345/", - httpmock.NewJsonResponderOrPanic(500, ethError{Error: "pop"})) + httpmock.NewJsonResponderOrPanic(500, common.BlockchainRESTError{Error: "pop"})) version, err := e.GetNetworkVersion(context.Background(), location) @@ -3477,7 +3885,7 @@ func TestConvertDeprecatedContractConfigContractURLBadQuery(t *testing.T) { defer httpmock.DeactivateAndReset() httpmock.RegisterResponder("GET", "http://localhost:12345/contracts/firefly", - httpmock.NewJsonResponderOrPanic(500, ethError{Error: "FFEC100148"})) + httpmock.NewJsonResponderOrPanic(500, common.BlockchainRESTError{Error: "FFEC100148"})) resetConf(e) utEthconnectConf.Set(ffresty.HTTPConfigURL, "http://localhost:12345") @@ -3966,9 +4374,12 @@ func TestValidateInvokeRequest(t *testing.T) { e, cancel := newTestEthereum() defer cancel() - err := e.ValidateInvokeRequest(context.Background(), testFFIMethod(), nil, nil, false) + parsedMethod, err := e.ParseInterface(context.Background(), testFFIMethod(), nil) + assert.NoError(t, err) + + err = e.ValidateInvokeRequest(context.Background(), parsedMethod, nil, false) assert.NoError(t, err) - err = e.ValidateInvokeRequest(context.Background(), testFFIMethod(), nil, nil, true) + err = e.ValidateInvokeRequest(context.Background(), parsedMethod, nil, true) assert.Regexp(t, "FF10443", err) } diff --git a/internal/blockchain/ethereum/eventstream.go b/internal/blockchain/ethereum/eventstream.go index 1715064f5b..8b1d505abc 100644 --- a/internal/blockchain/ethereum/eventstream.go +++ b/internal/blockchain/ethereum/eventstream.go @@ -34,8 +34,10 @@ import ( ) type streamManager struct { - client *resty.Client - cache cache.CInterface + client *resty.Client + cache cache.CInterface + batchSize uint + batchTimeout uint } type eventStream struct { @@ -65,10 +67,12 @@ type subscriptionCheckpoint struct { Catchup bool `json:"catchup,omitempty"` } -func newStreamManager(client *resty.Client, cache cache.CInterface) *streamManager { +func newStreamManager(client *resty.Client, cache cache.CInterface, batchSize, batchTimeout uint) *streamManager { return &streamManager{ - client: client, - cache: cache, + client: client, + cache: cache, + batchSize: batchSize, + batchTimeout: batchTimeout, } } @@ -97,8 +101,8 @@ func buildEventStream(topic string, batchSize, batchTimeout uint) *eventStream { } } -func (s *streamManager) createEventStream(ctx context.Context, topic string, batchSize, batchTimeout uint) (*eventStream, error) { - stream := buildEventStream(topic, batchSize, batchTimeout) +func (s *streamManager) createEventStream(ctx context.Context, topic string) (*eventStream, error) { + stream := buildEventStream(topic, s.batchSize, s.batchTimeout) res, err := s.client.R(). SetContext(ctx). SetBody(stream). @@ -123,21 +127,21 @@ func (s *streamManager) updateEventStream(ctx context.Context, topic string, bat return stream, nil } -func (s *streamManager) ensureEventStream(ctx context.Context, topic string, batchSize, batchTimeout uint) (*eventStream, error) { +func (s *streamManager) ensureEventStream(ctx context.Context, topic string) (*eventStream, error) { existingStreams, err := s.getEventStreams(ctx) if err != nil { return nil, err } for _, stream := range existingStreams { if stream.Name == topic { - stream, err = s.updateEventStream(ctx, topic, batchSize, batchTimeout, stream.ID) + stream, err = s.updateEventStream(ctx, topic, s.batchSize, s.batchTimeout, stream.ID) if err != nil { return nil, err } return stream, nil } } - return s.createEventStream(ctx, topic, batchSize, batchTimeout) + return s.createEventStream(ctx, topic) } func (s *streamManager) getSubscriptions(ctx context.Context) (subs []*subscription, err error) { diff --git a/internal/blockchain/fabric/config.go b/internal/blockchain/fabric/config.go index cf1d4588df..f2694c3949 100644 --- a/internal/blockchain/fabric/config.go +++ b/internal/blockchain/fabric/config.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -26,6 +26,10 @@ const ( defaultBatchTimeout = 500 defaultPrefixShort = "fly" defaultPrefixLong = "firefly" + + defaultBackgroundInitialDelay = "5s" + defaultBackgroundRetryFactor = 2.0 + defaultBackgroundMaxDelay = "1m" ) const ( @@ -49,6 +53,14 @@ const ( FabconnectPrefixLong = "prefixLong" // FabconnectConfigChaincodeDeprecated is the Fabric Firefly chaincode deployed to the Firefly channels FabconnectConfigChaincodeDeprecated = "chaincode" + // FabconnectBackgroundStart is used to not fail the fabric plugin on init and retry to start it in the background + FabconnectBackgroundStart = "backgroundStart.enabled" + // FabconnectBackgroundStartInitialDelay is delay between restarts in the case where we retry to restart in the fabric plugin + FabconnectBackgroundStartInitialDelay = "backgroundStart.initialDelay" + // FabconnectBackgroundStartMaxDelay is the max delay between restarts in the case where we retry to restart in the fabric plugin + FabconnectBackgroundStartMaxDelay = "backgroundStart.maxDelay" + // FabconnectBackgroundStartFactor is to set the factor by which the delay increases when retrying + FabconnectBackgroundStartFactor = "backgroundStart.factor" ) func (f *Fabric) InitConfig(config config.Section) { @@ -62,4 +74,8 @@ func (f *Fabric) InitConfig(config config.Section) { f.fabconnectConf.AddKnownKey(FabconnectConfigBatchTimeout, defaultBatchTimeout) f.fabconnectConf.AddKnownKey(FabconnectPrefixShort, defaultPrefixShort) f.fabconnectConf.AddKnownKey(FabconnectPrefixLong, defaultPrefixLong) + f.fabconnectConf.AddKnownKey(FabconnectBackgroundStart) + f.fabconnectConf.AddKnownKey(FabconnectBackgroundStartFactor, defaultBackgroundRetryFactor) + f.fabconnectConf.AddKnownKey(FabconnectBackgroundStartInitialDelay, defaultBackgroundInitialDelay) + f.fabconnectConf.AddKnownKey(FabconnectBackgroundStartMaxDelay, defaultBackgroundMaxDelay) } diff --git a/internal/blockchain/fabric/eventstream.go b/internal/blockchain/fabric/eventstream.go index ee69b2fdf6..3db62c3eea 100644 --- a/internal/blockchain/fabric/eventstream.go +++ b/internal/blockchain/fabric/eventstream.go @@ -30,9 +30,11 @@ import ( ) type streamManager struct { - client *resty.Client - signer string - cache cache.CInterface + client *resty.Client + signer string + cache cache.CInterface + batchSize uint + batchTimeoutMS uint } type eventStream struct { @@ -61,11 +63,13 @@ type eventFilter struct { EventFilter string `json:"eventFilter"` } -func newStreamManager(client *resty.Client, signer string, cache cache.CInterface) *streamManager { +func newStreamManager(client *resty.Client, signer string, cache cache.CInterface, batchSize, batchTimeout uint) *streamManager { return &streamManager{ - client: client, - signer: signer, - cache: cache, + client: client, + signer: signer, + cache: cache, + batchSize: batchSize, + batchTimeoutMS: batchTimeout, } } @@ -94,8 +98,8 @@ func buildEventStream(topic string, batchSize, batchTimeout uint) *eventStream { } } -func (s *streamManager) createEventStream(ctx context.Context, topic string, batchSize, batchTimeout uint) (*eventStream, error) { - stream := buildEventStream(topic, batchSize, batchTimeout) +func (s *streamManager) createEventStream(ctx context.Context, topic string) (*eventStream, error) { + stream := buildEventStream(topic, s.batchSize, s.batchTimeoutMS) res, err := s.client.R(). SetContext(ctx). SetBody(stream). @@ -107,7 +111,7 @@ func (s *streamManager) createEventStream(ctx context.Context, topic string, bat return stream, nil } -func (s *streamManager) ensureEventStream(ctx context.Context, topic string, batchSize, batchTimeout uint) (*eventStream, error) { +func (s *streamManager) ensureEventStream(ctx context.Context, topic string) (*eventStream, error) { existingStreams, err := s.getEventStreams(ctx) if err != nil { return nil, err @@ -117,7 +121,7 @@ func (s *streamManager) ensureEventStream(ctx context.Context, topic string, bat return stream, nil } } - return s.createEventStream(ctx, topic, batchSize, batchTimeout) + return s.createEventStream(ctx, topic) } func (s *streamManager) getSubscriptions(ctx context.Context) (subs []*subscription, err error) { diff --git a/internal/blockchain/fabric/fabric.go b/internal/blockchain/fabric/fabric.go index 1af7c82b2f..03b3fca8ef 100644 --- a/internal/blockchain/fabric/fabric.go +++ b/internal/blockchain/fabric/fabric.go @@ -31,6 +31,7 @@ import ( "github.com/hyperledger/firefly-common/pkg/fftypes" "github.com/hyperledger/firefly-common/pkg/i18n" "github.com/hyperledger/firefly-common/pkg/log" + "github.com/hyperledger/firefly-common/pkg/retry" "github.com/hyperledger/firefly-common/pkg/wsclient" "github.com/hyperledger/firefly/internal/blockchain/common" "github.com/hyperledger/firefly/internal/cache" @@ -46,25 +47,27 @@ const ( ) type Fabric struct { - ctx context.Context - cancelCtx context.CancelFunc - topic string - defaultChannel string - signer string - prefixShort string - prefixLong string - capabilities *blockchain.Capabilities - callbacks common.BlockchainCallbacks - client *resty.Client - streams *streamManager - streamID string - idCache map[string]*fabIdentity - wsconn wsclient.WSClient - closed chan struct{} - metrics metrics.Manager - fabconnectConf config.Section - subs common.FireflySubscriptions - cache cache.CInterface + ctx context.Context + cancelCtx context.CancelFunc + topic string + defaultChannel string + signer string + prefixShort string + prefixLong string + capabilities *blockchain.Capabilities + callbacks common.BlockchainCallbacks + client *resty.Client + streams *streamManager + streamID string + idCache map[string]*fabIdentity + wsconn wsclient.WSClient + closed chan struct{} + metrics metrics.Manager + fabconnectConf config.Section + subs common.FireflySubscriptions + cache cache.CInterface + backgroundRetry *retry.Retry + backgroundStart bool } type eventStreamWebsocket struct { @@ -80,10 +83,6 @@ type fabTxInputHeaders struct { Chaincode string `json:"chaincode,omitempty"` } -type fabError struct { - Error string `json:"error,omitempty"` -} - type PayloadSchema struct { Type string `json:"type"` PrefixItems []*PrefixItem `json:"prefixItems"` @@ -206,7 +205,11 @@ func (f *Fabric) Init(ctx context.Context, cancelCtx context.CancelFunc, conf co return i18n.NewError(ctx, coremsgs.MsgMissingPluginConfig, "url", "blockchain.fabric.fabconnect") } - f.client, err = ffresty.New(f.ctx, fabconnectConf) + wsConfig, err := wsclient.GenerateConfig(ctx, fabconnectConf) + if err == nil { + f.client, err = ffresty.New(f.ctx, fabconnectConf) + } + if err != nil { return err } @@ -221,11 +224,6 @@ func (f *Fabric) Init(ctx context.Context, cancelCtx context.CancelFunc, conf co f.prefixShort = fabconnectConf.GetString(FabconnectPrefixShort) f.prefixLong = fabconnectConf.GetString(FabconnectPrefixLong) - wsConfig, err := wsclient.GenerateConfig(ctx, fabconnectConf) - if err != nil { - return err - } - if wsConfig.WSKeyPath == "" { wsConfig.WSKeyPath = "/ws" } @@ -247,10 +245,20 @@ func (f *Fabric) Init(ctx context.Context, cancelCtx context.CancelFunc, conf co } f.cache = cache - f.streams = newStreamManager(f.client, f.signer, f.cache) - batchSize := f.fabconnectConf.GetUint(FabconnectConfigBatchSize) - batchTimeout := uint(f.fabconnectConf.GetDuration(FabconnectConfigBatchTimeout).Milliseconds()) - stream, err := f.streams.ensureEventStream(f.ctx, f.topic, batchSize, batchTimeout) + f.streams = newStreamManager(f.client, f.signer, f.cache, f.fabconnectConf.GetUint(FabconnectConfigBatchSize), uint(f.fabconnectConf.GetDuration(FabconnectConfigBatchTimeout).Milliseconds())) + + f.backgroundStart = f.fabconnectConf.GetBool(FabconnectBackgroundStart) + + if f.backgroundStart { + f.backgroundRetry = &retry.Retry{ + InitialDelay: f.fabconnectConf.GetDuration(FabconnectBackgroundStartInitialDelay), + MaximumDelay: f.fabconnectConf.GetDuration(FabconnectBackgroundStartMaxDelay), + Factor: f.fabconnectConf.GetFloat64(FabconnectBackgroundStartFactor), + } + return nil + } + + stream, err := f.streams.ensureEventStream(f.ctx, f.topic) if err != nil { return err } @@ -271,7 +279,33 @@ func (f *Fabric) SetOperationHandler(namespace string, handler core.OperationCal f.callbacks.SetOperationalHandler(namespace, handler) } +func (f *Fabric) backgroundStartLoop() { + _ = f.backgroundRetry.Do(f.ctx, fmt.Sprintf("fabric connector %s", f.Name()), func(attempt int) (retry bool, err error) { + stream, err := f.streams.ensureEventStream(f.ctx, f.topic) + if err != nil { + return true, err + } + + f.streamID = stream.ID + log.L(f.ctx).Infof("Event stream: %s (topic=%s)", f.streamID, f.topic) + + err = f.wsconn.Connect() + if err != nil { + return true, err + } + + f.closed = make(chan struct{}) + go f.eventLoop() + + return false, nil + }) +} + func (f *Fabric) Start() (err error) { + if f.backgroundStart { + go f.backgroundStartLoop() + return nil + } return f.wsconn.Connect() } @@ -317,10 +351,15 @@ func (f *Fabric) parseBlockchainEvent(ctx context.Context, msgJSON fftypes.JSONO return nil // move on } + // Fabric events are dispatched by the underlying fabric client to FabConnect with just the block number + // and the transaction hash. The index of the transaction in the block, or the index of the action within + // the transaction are not available. So we cannot generate an alphanumerically sortable string + // into the protocol ID. Instead we can only do this (which is according to Fabric rules assured to be + // unique, as Fabric only allows one event per transaction): sTransactionHash := msgJSON.GetString("transactionId") blockNumber := msgJSON.GetInt64("blockNumber") - transactionIndex := msgJSON.GetInt64("transactionIndex") - eventIndex := msgJSON.GetInt64("eventIndex") + protocolID := fmt.Sprintf("%.12d/%s", blockNumber, sTransactionHash) + name := msgJSON.GetString("eventName") timestamp := msgJSON.GetInt64("timestamp") chaincode := msgJSON.GetString("chaincodeId") @@ -330,7 +369,7 @@ func (f *Fabric) parseBlockchainEvent(ctx context.Context, msgJSON fftypes.JSONO BlockchainTXID: sTransactionHash, Source: f.Name(), Name: name, - ProtocolID: fmt.Sprintf("%.12d/%.6d/%.6d", blockNumber, transactionIndex, eventIndex), + ProtocolID: protocolID, Output: *payload, Info: msgJSON, Timestamp: fftypes.UnixTime(timestamp), @@ -339,10 +378,10 @@ func (f *Fabric) parseBlockchainEvent(ctx context.Context, msgJSON fftypes.JSONO } } -func (f *Fabric) handleBatchPinEvent(ctx context.Context, location *fftypes.JSONAny, subInfo *common.SubscriptionInfo, msgJSON fftypes.JSONObject) (err error) { +func (f *Fabric) processBatchPinEvent(ctx context.Context, events common.EventsToDispatch, location *fftypes.JSONAny, subInfo *common.SubscriptionInfo, msgJSON fftypes.JSONObject) { event := f.parseBlockchainEvent(ctx, msgJSON) if event == nil { - return nil // move on + return // move on } signer := event.Output.GetString("signer") @@ -360,27 +399,28 @@ func (f *Fabric) handleBatchPinEvent(ctx context.Context, location *fftypes.JSON Value: signer, } - return f.callbacks.BatchPinOrNetworkAction(ctx, subInfo, location, event, verifier, params) + f.callbacks.PrepareBatchPinOrNetworkAction(ctx, events, subInfo, location, event, verifier, params) } func (f *Fabric) buildEventLocationString(chaincode string) string { return fmt.Sprintf("chaincode=%s", chaincode) } -func (f *Fabric) handleContractEvent(ctx context.Context, msgJSON fftypes.JSONObject) (err error) { - subName, err := f.streams.getSubscriptionName(ctx, msgJSON.GetString("subId")) +func (f *Fabric) processContractEvent(ctx context.Context, events common.EventsToDispatch, msgJSON fftypes.JSONObject) (err error) { + subID := msgJSON.GetString("subId") + subName, err := f.streams.getSubscriptionName(ctx, subID) if err != nil { - return err + return err // this is a problem - we should be able to find the listener that dispatched this to us } namespace := common.GetNamespaceFromSubName(subName) event := f.parseBlockchainEvent(ctx, msgJSON) - if event == nil { - return nil // move on + if event != nil { + f.callbacks.PrepareBlockchainEvent(ctx, events, namespace, &blockchain.EventForListener{ + Event: event, + ListenerID: subID, + }) } - return f.callbacks.BlockchainEvent(ctx, namespace, &blockchain.EventWithSubscription{ - Event: *event, - Subscription: msgJSON.GetString("subId"), - }) + return nil } func (f *Fabric) AddFireflySubscription(ctx context.Context, namespace *core.Namespace, contract *blockchain.MultipartyContract) (string, error) { @@ -422,6 +462,9 @@ func (f *Fabric) RemoveFireflySubscription(ctx context.Context, subID string) { } func (f *Fabric) handleMessageBatch(ctx context.Context, messages []interface{}) error { + // Build the set of events that need handling + events := make(common.EventsToDispatch) + count := len(messages) for i, msgI := range messages { msgMap, ok := msgI.(map[string]interface{}) if !ok { @@ -430,12 +473,10 @@ func (f *Fabric) handleMessageBatch(ctx context.Context, messages []interface{}) } msgJSON := fftypes.JSONObject(msgMap) - logger := log.L(ctx).WithField("fabmsgidx", i) - eventCtx, done := context.WithCancel(log.WithLogger(ctx, logger)) - eventName := msgJSON.GetString("eventName") sub := msgJSON.GetString("subId") - logger.Infof("Received '%s' message on '%s'", eventName, sub) + logger := log.L(ctx) + logger.Infof("[Fabric:%d/%d]: '%s' on '%s'", i+1, count, eventName, sub) logger.Tracef("Message: %+v", msgJSON) // Matches one of the active FireFly BatchPin subscriptions @@ -445,31 +486,26 @@ func (f *Fabric) handleMessageBatch(ctx context.Context, messages []interface{}) Channel: subInfo.Extra.(string), }) if err != nil { - done() return err } switch eventName { case broadcastBatchEventName: - if err := f.handleBatchPinEvent(eventCtx, location, subInfo, msgJSON); err != nil { - done() - return err - } + f.processBatchPinEvent(ctx, events, location, subInfo, msgJSON) default: log.L(ctx).Infof("Ignoring event with unknown name: %s", eventName) } } else { // Subscription not recognized - assume it's from a custom contract listener // (event manager will reject it if it's not) - if err := f.handleContractEvent(ctx, msgJSON); err != nil { - done() + if err := f.processContractEvent(ctx, events, msgJSON); err != nil { return err } } - done() } - - return nil + // Dispatch all the events from this patch that were successfully parsed and routed to namespaces + // (could be zero - that's ok) + return f.callbacks.DispatchBlockchainEvents(ctx, events) } func (f *Fabric) eventLoop() { @@ -562,19 +598,12 @@ func (f *Fabric) ResolveSigningKey(ctx context.Context, signingKeyInput string, return signingKeyInput, nil } -func wrapError(ctx context.Context, errRes *fabError, res *resty.Response, err error) error { - if errRes != nil && errRes.Error != "" { - return i18n.WrapError(ctx, err, coremsgs.MsgFabconnectRESTErr, errRes.Error) - } - return ffresty.WrapRestErr(ctx, res, err, coremsgs.MsgFabconnectRESTErr) -} - -func (f *Fabric) invokeContractMethod(ctx context.Context, channel, chaincode, methodName, signingKey, requestID string, prefixItems []*PrefixItem, input map[string]interface{}, options map[string]interface{}) error { +func (f *Fabric) invokeContractMethod(ctx context.Context, channel, chaincode, methodName, signingKey, requestID string, prefixItems []*PrefixItem, input map[string]interface{}, options map[string]interface{}) (submissionRejected bool, err error) { body, err := f.buildFabconnectRequestBody(ctx, channel, chaincode, methodName, signingKey, requestID, prefixItems, input, options) if err != nil { - return err + return true, err } - var resErr fabError + var resErr common.BlockchainRESTError res, err := f.client.R(). SetContext(ctx). SetHeader("x-firefly-sync", "false"). @@ -582,9 +611,9 @@ func (f *Fabric) invokeContractMethod(ctx context.Context, channel, chaincode, m SetError(&resErr). Post("/transactions") if err != nil || !res.IsSuccess() { - return wrapError(ctx, &resErr, res, err) + return resErr.SubmissionRejected, common.WrapRESTError(ctx, &resErr, res, err, coremsgs.MsgFabconnectRESTErr) } - return nil + return false, nil } func (f *Fabric) queryContractMethod(ctx context.Context, channel, chaincode, methodName, signingKey, requestID string, prefixItems []*PrefixItem, input map[string]interface{}, options map[string]interface{}) (*resty.Response, error) { @@ -592,14 +621,14 @@ func (f *Fabric) queryContractMethod(ctx context.Context, channel, chaincode, me if err != nil { return nil, err } - var resErr fabError + var resErr common.BlockchainRESTError res, err := f.client.R(). SetContext(ctx). SetBody(body). SetError(&resErr). Post("/query") if err != nil || !res.IsSuccess() { - return res, wrapError(ctx, &resErr, res, err) + return res, common.WrapRESTError(ctx, &resErr, res, err, coremsgs.MsgFabconnectRESTErr) } return res, nil } @@ -667,7 +696,8 @@ func (f *Fabric) SubmitBatchPin(ctx context.Context, nsOpID, networkNamespace, s prefixItems, pinInput := f.buildBatchPinInput(ctx, version, networkNamespace, batch) input, _ := jsonEncodeInput(pinInput) - return f.invokeContractMethod(ctx, fabricOnChainLocation.Channel, fabricOnChainLocation.Chaincode, batchPinMethodName, signingKey, nsOpID, prefixItems, input, nil) + _, err = f.invokeContractMethod(ctx, fabricOnChainLocation.Channel, fabricOnChainLocation.Chaincode, batchPinMethodName, signingKey, nsOpID, prefixItems, input, nil) + return err } func (f *Fabric) SubmitNetworkAction(ctx context.Context, nsOpID string, signingKey string, action core.NetworkActionType, location *fftypes.JSONAny) error { @@ -705,7 +735,8 @@ func (f *Fabric) SubmitNetworkAction(ctx context.Context, nsOpID string, signing } input, _ := jsonEncodeInput(pinInput) - return f.invokeContractMethod(ctx, fabricOnChainLocation.Channel, fabricOnChainLocation.Chaincode, methodName, signingKey, nsOpID, prefixItems, input, nil) + _, err = f.invokeContractMethod(ctx, fabricOnChainLocation.Channel, fabricOnChainLocation.Chaincode, methodName, signingKey, nsOpID, prefixItems, input, nil) + return err } func (f *Fabric) buildFabconnectRequestBody(ctx context.Context, channel, chaincode, methodName, signingKey, requestID string, prefixItems []*PrefixItem, input map[string]interface{}, options map[string]interface{}) (map[string]interface{}, error) { @@ -739,19 +770,26 @@ func (f *Fabric) buildFabconnectRequestBody(ctx context.Context, channel, chainc return body, nil } -func (f *Fabric) DeployContract(ctx context.Context, nsOpID, signingKey string, definition, contract *fftypes.JSONAny, input []interface{}, options map[string]interface{}) error { - return i18n.NewError(ctx, coremsgs.MsgNotSupportedByBlockchainPlugin) +func (f *Fabric) DeployContract(ctx context.Context, nsOpID, signingKey string, definition, contract *fftypes.JSONAny, input []interface{}, options map[string]interface{}) (submissionRejected bool, err error) { + return true, i18n.NewError(ctx, coremsgs.MsgNotSupportedByBlockchainPlugin) } -func (f *Fabric) ValidateInvokeRequest(ctx context.Context, method *fftypes.FFIMethod, input map[string]interface{}, errors []*fftypes.FFIError, hasMessage bool) error { +func (f *Fabric) ValidateInvokeRequest(ctx context.Context, parsedMethod interface{}, input map[string]interface{}, hasMessage bool) error { // No additional validation beyond what is enforced by Contract Manager - return nil + _, _, err := f.recoverFFI(ctx, parsedMethod) + return err } -func (f *Fabric) InvokeContract(ctx context.Context, nsOpID string, signingKey string, location *fftypes.JSONAny, method *fftypes.FFIMethod, input map[string]interface{}, errors []*fftypes.FFIError, options map[string]interface{}, batch *blockchain.BatchPin) error { +func (f *Fabric) InvokeContract(ctx context.Context, nsOpID string, signingKey string, location *fftypes.JSONAny, parsedMethod interface{}, input map[string]interface{}, options map[string]interface{}, batch *blockchain.BatchPin) (bool, error) { + + method, _, err := f.recoverFFI(ctx, parsedMethod) + if err != nil { + return true, err + } + fabricOnChainLocation, err := parseContractLocation(ctx, location) if err != nil { - return err + return true, err } // Build the payload schema for the method parameters @@ -759,7 +797,7 @@ func (f *Fabric) InvokeContract(ctx context.Context, nsOpID string, signingKey s for i, param := range method.Params { var paramSchema ffiParamSchema if err := json.Unmarshal(param.Schema.Bytes(), ¶mSchema); err != nil { - return i18n.WrapError(ctx, err, i18n.MsgJSONObjectParseFailed, fmt.Sprintf("%s.schema", param.Name)) + return true, i18n.WrapError(ctx, err, i18n.MsgJSONObjectParseFailed, fmt.Sprintf("%s.schema", param.Name)) } prefixItems[i] = &PrefixItem{ @@ -781,7 +819,34 @@ func (f *Fabric) InvokeContract(ctx context.Context, nsOpID string, signingKey s return f.invokeContractMethod(ctx, fabricOnChainLocation.Channel, fabricOnChainLocation.Chaincode, method.Name, signingKey, nsOpID, prefixItems, input, options) } -func (f *Fabric) QueryContract(ctx context.Context, signingKey string, location *fftypes.JSONAny, method *fftypes.FFIMethod, input map[string]interface{}, errors []*fftypes.FFIError, options map[string]interface{}) (interface{}, error) { +type ffiMethodAndErrors struct { + method *fftypes.FFIMethod + errors []*fftypes.FFIError +} + +func (f *Fabric) ParseInterface(ctx context.Context, method *fftypes.FFIMethod, errors []*fftypes.FFIError) (interface{}, error) { + // Not very sophisticated here - we don't need to parse the FFIMethod/FFIError for Fabric, + // as there is no underlying schema to map them to. So we just use them directly. + return &ffiMethodAndErrors{ + method: method, + errors: errors, + }, nil +} + +func (f *Fabric) recoverFFI(ctx context.Context, parsedMethod interface{}) (*fftypes.FFIMethod, []*fftypes.FFIError, error) { + methodInfo, ok := parsedMethod.(*ffiMethodAndErrors) + if !ok || methodInfo.method == nil { + return nil, nil, i18n.NewError(ctx, coremsgs.MsgUnexpectedInterfaceType, parsedMethod) + } + return methodInfo.method, methodInfo.errors, nil +} + +func (f *Fabric) QueryContract(ctx context.Context, signingKey string, location *fftypes.JSONAny, parsedMethod interface{}, input map[string]interface{}, options map[string]interface{}) (interface{}, error) { + method, _, err := f.recoverFFI(ctx, parsedMethod) + if err != nil { + return nil, err + } + fabricOnChainLocation, err := parseContractLocation(ctx, location) if err != nil { return nil, err @@ -977,7 +1042,7 @@ func (f *Fabric) GetTransactionStatus(ctx context.Context, operation *core.Opera transactionRequestPath := fmt.Sprintf("/transactions/%s", txHash) client := f.client - var resErr fabError + var resErr common.BlockchainRESTError var statusResponse fftypes.JSONObject res, err := client.R(). SetContext(ctx). @@ -990,7 +1055,7 @@ func (f *Fabric) GetTransactionStatus(ctx context.Context, operation *core.Opera if res.StatusCode() == 404 { return nil, nil } - return nil, wrapError(ctx, &resErr, res, err) + return nil, common.WrapRESTError(ctx, &resErr, res, err, coremsgs.MsgFabconnectRESTErr) } // TODO - could implement the same enhancement ethconnect has, and build a mock WS receipt if an API query diff --git a/internal/blockchain/fabric/fabric_test.go b/internal/blockchain/fabric/fabric_test.go index 16d969fb1d..b661a29959 100644 --- a/internal/blockchain/fabric/fabric_test.go +++ b/internal/blockchain/fabric/fabric_test.go @@ -33,6 +33,7 @@ import ( "github.com/hyperledger/firefly-common/pkg/fftls" "github.com/hyperledger/firefly-common/pkg/fftypes" "github.com/hyperledger/firefly-common/pkg/log" + "github.com/hyperledger/firefly-common/pkg/retry" "github.com/hyperledger/firefly-common/pkg/wsclient" "github.com/hyperledger/firefly/internal/blockchain/common" "github.com/hyperledger/firefly/internal/cache" @@ -62,7 +63,7 @@ func resetConf(e *Fabric) { func newTestFabric() (*Fabric, func()) { ctx, cancel := context.WithCancel(context.Background()) wsm := &wsmocks.WSClient{} - e := &Fabric{ + f := &Fabric{ ctx: ctx, cancelCtx: cancel, client: resty.New().SetBaseURL("http://localhost:12345"), @@ -75,17 +76,17 @@ func newTestFabric() (*Fabric, func()) { callbacks: common.NewBlockchainCallbacks(), subs: common.NewFireflySubscriptions(), } - return e, func() { + return f, func() { cancel() - if e.closed != nil { + if f.closed != nil { // We've init'd, wait to close - <-e.closed + <-f.closed } } } func newTestStreamManager(client *resty.Client, signer string) *streamManager { - return newStreamManager(client, signer, cache.NewUmanagedCache(context.Background(), 100, 5*time.Minute)) + return newStreamManager(client, signer, cache.NewUmanagedCache(context.Background(), 100, 5*time.Minute), defaultBatchSize, defaultBatchTimeout) } func testFFIMethod() *fftypes.FFIMethod { @@ -150,6 +151,33 @@ func TestInitMissingURL(t *testing.T) { assert.Regexp(t, "FF10138.*url", err) } +func TestInitBackgroundStart(t *testing.T) { + f, cancel := newTestFabric() + defer cancel() + resetConf(f) + + httpmock.RegisterResponder("GET", "http://localhost:12345/eventstreams", func(r *http.Request) (*http.Response, error) { + assert.Fail(t, "Should not call event streams on init") + return &http.Response{}, nil + }) + + mockedClient := &http.Client{} + httpmock.ActivateNonDefault(mockedClient) + defer httpmock.DeactivateAndReset() + + utFabconnectConf.Set(ffresty.HTTPConfigURL, "http://localhost:12345") + utFabconnectConf.Set(ffresty.HTTPCustomClient, mockedClient) + utFabconnectConf.Set(FabconnectBackgroundStart, true) + utFabconnectConf.Set(FabconnectConfigTopic, "topic1") + + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(f.ctx, 100, 5*time.Minute), nil) + err := f.Init(f.ctx, f.cancelCtx, utConfig, &metricsmocks.Manager{}, cmi) + + assert.NoError(t, err) + assert.Empty(t, f.streamID) +} + func TestGenerateErrorSignatureNoOp(t *testing.T) { e, cancel := newTestFabric() defer cancel() @@ -266,6 +294,206 @@ func TestInitAllNewStreamsAndWSEvent(t *testing.T) { } +func TestBackgroundStart(t *testing.T) { + + log.SetLevel("trace") + e, cancel := newTestFabric() + defer cancel() + + toServer, fromServer, wsURL, done := wsclient.NewTestWSServer(nil) + defer done() + + mockedClient := &http.Client{} + httpmock.ActivateNonDefault(mockedClient) + defer httpmock.DeactivateAndReset() + + u, _ := url.Parse(wsURL) + u.Scheme = "http" + httpURL := u.String() + + httpmock.RegisterResponder("GET", fmt.Sprintf("%s/eventstreams", httpURL), + httpmock.NewJsonResponderOrPanic(200, []eventStream{})) + httpmock.RegisterResponder("POST", fmt.Sprintf("%s/eventstreams", httpURL), + httpmock.NewJsonResponderOrPanic(200, eventStream{ID: "es12345"})) + + resetConf(e) + utFabconnectConf.Set(ffresty.HTTPConfigURL, httpURL) + utFabconnectConf.Set(ffresty.HTTPCustomClient, mockedClient) + utFabconnectConf.Set(FabconnectConfigChaincodeDeprecated, "firefly") + utFabconnectConf.Set(FabconnectConfigSigner, "signer001") + utFabconnectConf.Set(FabconnectConfigTopic, "topic1") + utFabconnectConf.Set(FabconnectBackgroundStart, true) + + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(e.ctx, 100, 5*time.Minute), nil) + originalContext := e.ctx + err := e.Init(e.ctx, e.cancelCtx, utConfig, &metricsmocks.Manager{}, cmi) + cmi.AssertCalled(t, "GetCache", cache.NewCacheConfig( + originalContext, + coreconfig.CacheBlockchainLimit, + coreconfig.CacheBlockchainTTL, + "", + )) + assert.NoError(t, err) + + msb := &blockchaincommonmocks.FireflySubscriptions{} + e.subs = msb + msb.On("GetSubscription", mock.Anything).Return(&common.SubscriptionInfo{ + Version: 2, + Extra: "channel1", + }) + + assert.Equal(t, "fabric", e.Name()) + assert.Equal(t, core.VerifierTypeMSPIdentity, e.VerifierType()) + + assert.NoError(t, err) + err = e.Start() + assert.NoError(t, err) + + assert.Eventually(t, func() bool { return httpmock.GetTotalCallCount() == 2 }, time.Second, time.Microsecond) + assert.Eventually(t, func() bool { return e.streamID == "es12345" }, time.Second, time.Microsecond) + assert.NotNil(t, e.Capabilities()) + + startupMessage := <-toServer + assert.Equal(t, `{"type":"listen","topic":"topic1"}`, startupMessage) + startupMessage = <-toServer + assert.Equal(t, `{"type":"listenreplies"}`, startupMessage) + fromServer <- `{"bad":"receipt"}` // will be ignored - no ack\ + fromServer <- `[]` // empty batch, will be ignored, but acked + reply := <-toServer + assert.Equal(t, `{"topic":"topic1","type":"ack"}`, reply) + fromServer <- `[{}]` // bad batch, which will be nack'd + reply = <-toServer + assert.Regexp(t, `{\"message\":\"FF10310: .*\",\"topic\":\"topic1\",\"type\":\"error\"}`, reply) + + // Bad data will be ignored + fromServer <- `!json` + fromServer <- `{"not": "a reply"}` + fromServer <- `42` + +} + +func TestBackgroundStartFail(t *testing.T) { + + log.SetLevel("trace") + e, cancel := newTestFabric() + defer cancel() + + _, _, wsURL, done := wsclient.NewTestWSServer(nil) + defer done() + + mockedClient := &http.Client{} + httpmock.ActivateNonDefault(mockedClient) + defer httpmock.DeactivateAndReset() + + u, _ := url.Parse(wsURL) + u.Scheme = "http" + httpURL := u.String() + + httpmock.RegisterResponder("GET", fmt.Sprintf("%s/eventstreams", httpURL), + httpmock.NewJsonResponderOrPanic(500, "Failed to get eventstreams")) + + resetConf(e) + utFabconnectConf.Set(ffresty.HTTPConfigURL, httpURL) + utFabconnectConf.Set(ffresty.HTTPCustomClient, mockedClient) + utFabconnectConf.Set(FabconnectConfigChaincodeDeprecated, "firefly") + utFabconnectConf.Set(FabconnectConfigSigner, "signer001") + utFabconnectConf.Set(FabconnectConfigTopic, "topic1") + utFabconnectConf.Set(FabconnectBackgroundStart, true) + utFabconnectConf.Set(wsclient.WSConfigKeyInitialConnectAttempts, 1) + + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(e.ctx, 100, 5*time.Minute), nil) + originalContext := e.ctx + err := e.Init(e.ctx, e.cancelCtx, utConfig, &metricsmocks.Manager{}, cmi) + + cmi.AssertCalled(t, "GetCache", cache.NewCacheConfig( + originalContext, + coreconfig.CacheBlockchainLimit, + coreconfig.CacheBlockchainTTL, + "", + )) + assert.NoError(t, err) + + capturedErr := make(chan error) + e.backgroundRetry = &retry.Retry{ + ErrCallback: func(err error) { + capturedErr <- err + }, + } + + err = e.Start() + assert.NoError(t, err) + + err = <-capturedErr + assert.Regexp(t, "FF10284", err) +} + +func TestBackgroundStartWSFail(t *testing.T) { + + log.SetLevel("trace") + e, cancel := newTestFabric() + defer cancel() + + mockedClient := &http.Client{} + httpmock.ActivateNonDefault(mockedClient) + defer httpmock.DeactivateAndReset() + + u, err := url.Parse("http://localhost:12345") + assert.NoError(t, err) + + httpURL := u.String() + + httpmock.RegisterResponder("GET", fmt.Sprintf("%s/eventstreams", httpURL), + httpmock.NewJsonResponderOrPanic(200, []eventStream{})) + httpmock.RegisterResponder("POST", fmt.Sprintf("%s/eventstreams", httpURL), + httpmock.NewJsonResponderOrPanic(200, eventStream{ID: "es12345"})) + + resetConf(e) + utFabconnectConf.Set(ffresty.HTTPConfigURL, httpURL) + utFabconnectConf.Set(ffresty.HTTPCustomClient, mockedClient) + utFabconnectConf.Set(FabconnectConfigChaincodeDeprecated, "firefly") + utFabconnectConf.Set(FabconnectConfigSigner, "signer001") + utFabconnectConf.Set(FabconnectConfigTopic, "topic1") + utFabconnectConf.Set(FabconnectBackgroundStart, true) + utFabconnectConf.Set(wsclient.WSConfigKeyInitialConnectAttempts, 1) + + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(e.ctx, 100, 5*time.Minute), nil) + originalContext := e.ctx + err = e.Init(e.ctx, e.cancelCtx, utConfig, &metricsmocks.Manager{}, cmi) + cmi.AssertCalled(t, "GetCache", cache.NewCacheConfig( + originalContext, + coreconfig.CacheBlockchainLimit, + coreconfig.CacheBlockchainTTL, + "", + )) + assert.NoError(t, err) + + msb := &blockchaincommonmocks.FireflySubscriptions{} + e.subs = msb + msb.On("GetSubscription", mock.Anything).Return(&common.SubscriptionInfo{ + Version: 2, + Extra: "channel1", + }) + + assert.Equal(t, "fabric", e.Name()) + assert.Equal(t, core.VerifierTypeMSPIdentity, e.VerifierType()) + + capturedErr := make(chan error) + e.backgroundRetry = &retry.Retry{ + ErrCallback: func(err error) { + capturedErr <- err + }, + } + + err = e.Start() + assert.NoError(t, err) + + err = <-capturedErr + assert.Regexp(t, "FF00148", err) +} + func TestWSInitFail(t *testing.T) { e, cancel := newTestFabric() @@ -1254,7 +1482,11 @@ func TestHandleMessageBatchPinOK(t *testing.T) { Value: "u0vgwu9s00-x509::CN=user2,OU=client::CN=fabric-ca-server", } - em.On("BatchPinComplete", "ns1", mock.Anything, expectedSigningKeyRef).Return(nil) + em.On("BlockchainEventBatch", mock.MatchedBy(func(events []*blockchain.EventToDispatch) bool { + return len(events) == 2 && + events[0].Type == blockchain.EventTypeBatchPinComplete && + *events[0].BatchPinComplete.SigningKey == *expectedSigningKeyRef + })).Return(nil) var events []interface{} err := json.Unmarshal(data, &events) @@ -1262,15 +1494,15 @@ func TestHandleMessageBatchPinOK(t *testing.T) { err = e.handleMessageBatch(context.Background(), events) assert.NoError(t, err) - b := em.Calls[0].Arguments[1].(*blockchain.BatchPin) - assert.Equal(t, "e19af8b3-9060-4051-812d-7597d19adfb9", b.TransactionID.String()) - assert.Equal(t, "847d3bfd-0742-49ef-b65d-3fed15f5b0a6", b.BatchID.String()) - assert.Equal(t, "d71eb138d74c229a388eb0e1abc03f4c7cbb21d4fc4b839fbf0ec73e4263f6be", b.BatchHash.String()) - assert.Equal(t, "Qmf412jQZiuVUtdgnB36FXFX7xg5V6KEbSJ4dpQuhkLyfD", b.BatchPayloadRef) - assert.Equal(t, expectedSigningKeyRef, em.Calls[1].Arguments[2]) - assert.Len(t, b.Contexts, 2) - assert.Equal(t, "68e4da79f805bca5b912bcda9c63d03e6e867108dabb9b944109aea541ef522a", b.Contexts[0].String()) - assert.Equal(t, "19b82093de5ce92a01e333048e877e2374354bf846dd034864ef6ffbd6438771", b.Contexts[1].String()) + b := em.Calls[0].Arguments[0].([]*blockchain.EventToDispatch)[1].BatchPinComplete + assert.Equal(t, "e19af8b3-9060-4051-812d-7597d19adfb9", b.Batch.TransactionID.String()) + assert.Equal(t, "847d3bfd-0742-49ef-b65d-3fed15f5b0a6", b.Batch.BatchID.String()) + assert.Equal(t, "d71eb138d74c229a388eb0e1abc03f4c7cbb21d4fc4b839fbf0ec73e4263f6be", b.Batch.BatchHash.String()) + assert.Equal(t, "Qmf412jQZiuVUtdgnB36FXFX7xg5V6KEbSJ4dpQuhkLyfD", b.Batch.BatchPayloadRef) + assert.Equal(t, expectedSigningKeyRef, b.SigningKey) + assert.Len(t, b.Batch.Contexts, 2) + assert.Equal(t, "68e4da79f805bca5b912bcda9c63d03e6e867108dabb9b944109aea541ef522a", b.Batch.Contexts[0].String()) + assert.Equal(t, "19b82093de5ce92a01e333048e877e2374354bf846dd034864ef6ffbd6438771", b.Batch.Contexts[1].String()) em.AssertExpectations(t) @@ -1801,8 +2033,6 @@ func TestHandleMessageContractEventOldSubscription(t *testing.T) { "chaincodeId": "basic", "blockNumber": 10, "transactionId": "4763a0c50e3bba7cef1a7ba35dd3f9f3426bb04d0156f326e84ec99387c4746d", - "transactionIndex": 20, - "eventIndex": 30, "eventName": "AssetCreated", "payload": "eyJBcHByYWlzZWRWYWx1ZSI6MTAsIkNvbG9yIjoicmVkIiwiSUQiOiIxMjM0IiwiT3duZXIiOiJtZSIsIlNpemUiOjN9", "subId": "sb-cb37cc07-e873-4f58-44ab-55add6bba320" @@ -1824,10 +2054,11 @@ func TestHandleMessageContractEventOldSubscription(t *testing.T) { e.callbacks = common.NewBlockchainCallbacks() e.SetHandler("ns1", em) - em.On("BlockchainEvent", mock.MatchedBy(func(e *blockchain.EventWithSubscription) bool { - assert.Equal(t, "4763a0c50e3bba7cef1a7ba35dd3f9f3426bb04d0156f326e84ec99387c4746d", e.BlockchainTXID) - assert.Equal(t, "000000000010/000020/000030", e.Event.ProtocolID) - return true + em.On("BlockchainEventBatch", mock.MatchedBy(func(batch []*blockchain.EventToDispatch) bool { + return len(batch) == 1 && + batch[0].Type == blockchain.EventTypeForListener && + batch[0].ForListener.ProtocolID == "000000000010/4763a0c50e3bba7cef1a7ba35dd3f9f3426bb04d0156f326e84ec99387c4746d" && + batch[0].ForListener.BlockchainTXID == "4763a0c50e3bba7cef1a7ba35dd3f9f3426bb04d0156f326e84ec99387c4746d" })).Return(nil) var events []interface{} @@ -1836,8 +2067,8 @@ func TestHandleMessageContractEventOldSubscription(t *testing.T) { err = e.handleMessageBatch(context.Background(), events) assert.NoError(t, err) - ev := em.Calls[0].Arguments[0].(*blockchain.EventWithSubscription) - assert.Equal(t, "sb-cb37cc07-e873-4f58-44ab-55add6bba320", ev.Subscription) + ev := em.Calls[0].Arguments[0].([]*blockchain.EventToDispatch)[0].ForListener + assert.Equal(t, "sb-cb37cc07-e873-4f58-44ab-55add6bba320", ev.ListenerID) assert.Equal(t, "AssetCreated", ev.Event.Name) outputs := fftypes.JSONObject{ @@ -1850,13 +2081,11 @@ func TestHandleMessageContractEventOldSubscription(t *testing.T) { assert.Equal(t, outputs, ev.Event.Output) info := fftypes.JSONObject{ - "blockNumber": float64(10), - "chaincodeId": "basic", - "eventName": "AssetCreated", - "subId": "sb-cb37cc07-e873-4f58-44ab-55add6bba320", - "transactionId": "4763a0c50e3bba7cef1a7ba35dd3f9f3426bb04d0156f326e84ec99387c4746d", - "transactionIndex": float64(20), - "eventIndex": float64(30), + "blockNumber": float64(10), + "chaincodeId": "basic", + "eventName": "AssetCreated", + "subId": "sb-cb37cc07-e873-4f58-44ab-55add6bba320", + "transactionId": "4763a0c50e3bba7cef1a7ba35dd3f9f3426bb04d0156f326e84ec99387c4746d", } assert.Equal(t, info, ev.Event.Info) @@ -1870,8 +2099,6 @@ func TestHandleMessageContractEventNamespacedHandlers(t *testing.T) { "chaincodeId": "basic", "blockNumber": 10, "transactionId": "4763a0c50e3bba7cef1a7ba35dd3f9f3426bb04d0156f326e84ec99387c4746d", - "transactionIndex": 20, - "eventIndex": 30, "eventName": "AssetCreated", "payload": "eyJBcHByYWlzZWRWYWx1ZSI6MTAsIkNvbG9yIjoicmVkIiwiSUQiOiIxMjM0IiwiT3duZXIiOiJtZSIsIlNpemUiOjN9", "subId": "sb-cb37cc07-e873-4f58-44ab-55add6bba320" @@ -1880,8 +2107,6 @@ func TestHandleMessageContractEventNamespacedHandlers(t *testing.T) { "chaincodeId": "basic", "blockNumber": 10, "transactionId": "4763a0c50e3bba7cef1a7ba35dd3f9f3426bb04d0156f326e84ec99387c4746f", - "transactionIndex": 20, - "eventIndex": 30, "eventName": "AssetCreated", "payload": "eyJBcHByYWlzZWRWYWx1ZSI6MTAsIkNvbG9yIjoicmVkIiwiSUQiOiIxMjM0IiwiT3duZXIiOiJtZSIsIlNpemUiOjN9", "subId": "sb-cb37cc07-e873-4f58-44ab-55add6bba320" @@ -1903,9 +2128,11 @@ func TestHandleMessageContractEventNamespacedHandlers(t *testing.T) { e.callbacks = common.NewBlockchainCallbacks() e.SetHandler("ns1", em) - em.On("BlockchainEvent", mock.MatchedBy(func(e *blockchain.EventWithSubscription) bool { - assert.Equal(t, "000000000010/000020/000030", e.Event.ProtocolID) - return true + em.On("BlockchainEventBatch", mock.MatchedBy(func(batch []*blockchain.EventToDispatch) bool { + return len(batch) == 2 && + batch[0].Type == blockchain.EventTypeForListener && + batch[0].ForListener.ProtocolID == "000000000010/4763a0c50e3bba7cef1a7ba35dd3f9f3426bb04d0156f326e84ec99387c4746d" && + batch[0].ForListener.BlockchainTXID == "4763a0c50e3bba7cef1a7ba35dd3f9f3426bb04d0156f326e84ec99387c4746d" })).Return(nil) var events []interface{} @@ -1914,8 +2141,8 @@ func TestHandleMessageContractEventNamespacedHandlers(t *testing.T) { err = e.handleMessageBatch(context.Background(), events) assert.NoError(t, err) - ev := em.Calls[0].Arguments[0].(*blockchain.EventWithSubscription) - assert.Equal(t, "sb-cb37cc07-e873-4f58-44ab-55add6bba320", ev.Subscription) + ev := em.Calls[0].Arguments[0].([]*blockchain.EventToDispatch)[0].ForListener + assert.Equal(t, "sb-cb37cc07-e873-4f58-44ab-55add6bba320", ev.ListenerID) assert.Equal(t, "AssetCreated", ev.Event.Name) outputs := fftypes.JSONObject{ @@ -1928,13 +2155,11 @@ func TestHandleMessageContractEventNamespacedHandlers(t *testing.T) { assert.Equal(t, outputs, ev.Event.Output) info := fftypes.JSONObject{ - "blockNumber": float64(10), - "chaincodeId": "basic", - "eventName": "AssetCreated", - "subId": "sb-cb37cc07-e873-4f58-44ab-55add6bba320", - "transactionId": "4763a0c50e3bba7cef1a7ba35dd3f9f3426bb04d0156f326e84ec99387c4746d", - "transactionIndex": float64(20), - "eventIndex": float64(30), + "blockNumber": float64(10), + "chaincodeId": "basic", + "eventName": "AssetCreated", + "subId": "sb-cb37cc07-e873-4f58-44ab-55add6bba320", + "transactionId": "4763a0c50e3bba7cef1a7ba35dd3f9f3426bb04d0156f326e84ec99387c4746d", } assert.Equal(t, info, ev.Event.Info) @@ -1948,8 +2173,6 @@ func TestHandleMessageContractEventNoNamespacedHandlers(t *testing.T) { "chaincodeId": "basic", "blockNumber": 10, "transactionId": "4763a0c50e3bba7cef1a7ba35dd3f9f3426bb04d0156f326e84ec99387c4746d", - "transactionIndex": 20, - "eventIndex": 30, "eventName": "AssetCreated", "payload": "eyJBcHByYWlzZWRWYWx1ZSI6MTAsIkNvbG9yIjoicmVkIiwiSUQiOiIxMjM0IiwiT3duZXIiOiJtZSIsIlNpemUiOjN9", "subId": "sb-cb37cc07-e873-4f58-44ab-55add6bba320" @@ -1964,16 +2187,16 @@ func TestHandleMessageContractEventNoNamespacedHandlers(t *testing.T) { httpmock.RegisterResponder("GET", "http://localhost:12345/subscriptions/sb-cb37cc07-e873-4f58-44ab-55add6bba320", httpmock.NewJsonResponderOrPanic(200, subscription{ - ID: "sb-cb37cc07-e873-4f58-44ab-55add6bba320", Stream: "es12345", Name: "ff-sub-ns1-11232312312", + ID: "sb-cb37cc07-e873-4f58-44ab-55add6bba320", Stream: "es12345", Name: "ff-sub-ns1-58113723-0cc3-411f-aa1b-948eca83b9cd", })) e.streams = newTestStreamManager(e.client, e.signer) e.callbacks = common.NewBlockchainCallbacks() e.SetHandler("ns2", em) - em.On("BlockchainEvent", mock.MatchedBy(func(e *blockchain.EventWithSubscription) bool { + em.On("BlockchainEvent", mock.MatchedBy(func(e *blockchain.EventForListener) bool { assert.Equal(t, "4763a0c50e3bba7cef1a7ba35dd3f9f3426bb04d0156f326e84ec99387c4746d", e.BlockchainTXID) - assert.Equal(t, "000000000010/000020/000030", e.Event.ProtocolID) + assert.Equal(t, "000000000010/4763a0c50e3bba7cef1a7ba35dd3f9f3426bb04d0156f326e84ec99387c4746d", e.Event.ProtocolID) return true })).Return(nil) @@ -1992,8 +2215,6 @@ func TestHandleMessageContractEventNoPayload(t *testing.T) { "chaincodeId": "basic", "blockNumber": 10, "transactionId": "4763a0c50e3bba7cef1a7ba35dd3f9f3426bb04d0156f326e84ec99387c4746d", - "transactionIndex": 20, - "eventIndex": 30, "eventName": "AssetCreated", "subId": "sb-cb37cc07-e873-4f58-44ab-55add6bba320" } @@ -2054,7 +2275,7 @@ func TestHandleMessageContractOldSubError(t *testing.T) { 1, "sb-b5b97a4e-a317-4053-6400-1474650efcb5", "firefly", ) - em.On("BlockchainEvent", mock.Anything).Return(fmt.Errorf("pop")) + em.On("BlockchainEventBatch", mock.Anything).Return(fmt.Errorf("pop")) var events []interface{} err := json.Unmarshal(data, &events) @@ -2093,7 +2314,7 @@ func TestHandleMessageContractEventError(t *testing.T) { e.callbacks = common.NewBlockchainCallbacks() e.SetHandler("ns1", em) - em.On("BlockchainEvent", mock.Anything).Return(fmt.Errorf("pop")) + em.On("BlockchainEventBatch", mock.Anything).Return(fmt.Errorf("pop")) var events []interface{} err := json.Unmarshal(data, &events) @@ -2123,7 +2344,7 @@ func TestHandleMessageContractGetSubError(t *testing.T) { defer httpmock.DeactivateAndReset() httpmock.RegisterResponder("GET", "http://localhost:12345/subscriptions/sb-cb37cc07-e873-4f58-44ab-55add6bba320", - httpmock.NewJsonResponderOrPanic(500, fabError{Error: "pop"})) + httpmock.NewJsonResponderOrPanic(500, common.BlockchainRESTError{Error: "pop"})) em := &blockchainmocks.Callbacks{} e.streams = newTestStreamManager(e.client, e.signer) @@ -2179,7 +2400,9 @@ func TestInvokeContractOK(t *testing.T) { return httpmock.NewJsonResponderOrPanic(200, "")(req) }) var errors []*fftypes.FFIError - err = e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), method, params, errors, options, nil) + parsedMethod, err := e.ParseInterface(context.Background(), method, errors) + assert.NoError(t, err) + _, err = e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options, nil) assert.NoError(t, err) } @@ -2217,7 +2440,9 @@ func TestInvokeContractWithBatchOK(t *testing.T) { return httpmock.NewJsonResponderOrPanic(200, "")(req) }) - err = e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), method, nil, nil, nil, batch) + parsedMethod, err := e.ParseInterface(context.Background(), method, nil) + assert.NoError(t, err) + _, err = e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, nil, nil, batch) assert.NoError(t, err) } @@ -2249,7 +2474,7 @@ func TestDeployContractOK(t *testing.T) { assert.Equal(t, body["customOption"].(string), "customValue") return httpmock.NewJsonResponderOrPanic(400, "pop")(req) }) - err = e.DeployContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(definitionBytes), fftypes.JSONAnyPtrBytes(contractBytes), input, options) + _, err = e.DeployContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(definitionBytes), fftypes.JSONAnyPtrBytes(contractBytes), input, options) assert.Regexp(t, "FF10429", err) } @@ -2282,7 +2507,9 @@ func TestInvokeContractBadSchema(t *testing.T) { locationBytes, err := json.Marshal(location) assert.NoError(t, err) var errors []*fftypes.FFIError - err = e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), method, params, errors, options, nil) + parsedMethod, err := e.ParseInterface(context.Background(), method, errors) + assert.NoError(t, err) + _, err = e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options, nil) assert.Regexp(t, "FF00127", err) } @@ -2305,7 +2532,9 @@ func TestInvokeContractInvalidOption(t *testing.T) { locationBytes, err := json.Marshal(location) assert.NoError(t, err) var errors []*fftypes.FFIError - err = e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), method, params, errors, options, nil) + parsedMethod, err := e.ParseInterface(context.Background(), method, errors) + assert.NoError(t, err) + _, err = e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options, nil) assert.Regexp(t, "FF10398", err) } @@ -2323,7 +2552,9 @@ func TestInvokeContractChaincodeNotSet(t *testing.T) { locationBytes, err := json.Marshal(location) assert.NoError(t, err) var errors []*fftypes.FFIError - err = e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), method, params, errors, options, nil) + parsedMethod, err := e.ParseInterface(context.Background(), method, errors) + assert.NoError(t, err) + _, err = e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options, nil) assert.Regexp(t, "FF10310", err) } @@ -2350,7 +2581,9 @@ func TestInvokeContractFabconnectError(t *testing.T) { return httpmock.NewJsonResponderOrPanic(400, "")(req) }) var errors []*fftypes.FFIError - err = e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), method, params, errors, options, nil) + parsedMethod, err := e.ParseInterface(context.Background(), method, errors) + assert.NoError(t, err) + _, err = e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options, nil) assert.Regexp(t, "FF10284", err) } @@ -2387,7 +2620,9 @@ func TestQueryContractOK(t *testing.T) { return httpmock.NewJsonResponderOrPanic(200, &fabQueryNamedOutput{})(req) }) var errors []*fftypes.FFIError - _, err = e.QueryContract(context.Background(), signingKey, fftypes.JSONAnyPtrBytes(locationBytes), method, params, errors, options) + parsedMethod, err := e.ParseInterface(context.Background(), method, errors) + assert.NoError(t, err) + _, err = e.QueryContract(context.Background(), signingKey, fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options) assert.NoError(t, err) } @@ -2410,7 +2645,9 @@ func TestQueryContractInputNotJSON(t *testing.T) { locationBytes, err := json.Marshal(location) assert.NoError(t, err) var errors []*fftypes.FFIError - _, err = e.QueryContract(context.Background(), "", fftypes.JSONAnyPtrBytes(locationBytes), method, params, errors, options) + parsedMethod, err := e.ParseInterface(context.Background(), method, errors) + assert.NoError(t, err) + _, err = e.QueryContract(context.Background(), "", fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options) assert.Regexp(t, "FF00127", err) } @@ -2428,7 +2665,9 @@ func TestQueryContractBadLocation(t *testing.T) { } options := map[string]interface{}{} var errors []*fftypes.FFIError - _, err := e.QueryContract(context.Background(), "", fftypes.JSONAnyPtr(`{"validLocation": false}`), method, params, errors, options) + parsedMethod, err := e.ParseInterface(context.Background(), method, errors) + assert.NoError(t, err) + _, err = e.QueryContract(context.Background(), "", fftypes.JSONAnyPtr(`{"validLocation": false}`), parsedMethod, params, options) assert.Regexp(t, "FF10310", err) } @@ -2454,7 +2693,9 @@ func TestQueryContractFabconnectError(t *testing.T) { return httpmock.NewJsonResponderOrPanic(400, &fabQueryNamedOutput{})(req) }) var errors []*fftypes.FFIError - _, err = e.QueryContract(context.Background(), "", fftypes.JSONAnyPtrBytes(locationBytes), method, params, errors, options) + parsedMethod, err := e.ParseInterface(context.Background(), method, errors) + assert.NoError(t, err) + _, err = e.QueryContract(context.Background(), "", fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options) assert.Regexp(t, "FF10284", err) } @@ -2486,7 +2727,9 @@ func TestQueryContractUnmarshalResponseError(t *testing.T) { return httpmock.NewStringResponder(200, "[definitely not JSON}")(req) }) var errors []*fftypes.FFIError - _, err = e.QueryContract(context.Background(), "", fftypes.JSONAnyPtrBytes(locationBytes), method, params, errors, options) + parsedMethod, err := e.ParseInterface(context.Background(), method, errors) + assert.NoError(t, err) + _, err = e.QueryContract(context.Background(), "", fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options) assert.Regexp(t, "invalid character", err) } @@ -2550,7 +2793,9 @@ func TestInvokeJSONEncodeParamsError(t *testing.T) { return httpmock.NewJsonResponderOrPanic(400, "")(req) }) var errors []*fftypes.FFIError - err = e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), method, params, errors, options, nil) + parsedMethod, err := e.ParseInterface(context.Background(), method, errors) + assert.NoError(t, err) + _, err = e.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options, nil) assert.Regexp(t, "FF00127", err) } @@ -2578,6 +2823,15 @@ func TestGenerateEventSignature(t *testing.T) { assert.Equal(t, "Changed", signature) } +func matchNetworkAction(action string, expectedSigningKey core.VerifierRef) interface{} { + return mock.MatchedBy(func(batch []*blockchain.EventToDispatch) bool { + return len(batch) == 1 && + batch[0].Type == blockchain.EventTypeNetworkAction && + batch[0].NetworkAction.Action == action && + *batch[0].NetworkAction.SigningKey == expectedSigningKey + }) +} + func TestHandleNetworkAction(t *testing.T) { data := []byte(` [ @@ -2585,8 +2839,6 @@ func TestHandleNetworkAction(t *testing.T) { "chaincodeId": "firefly", "blockNumber": 91, "transactionId": "ce79343000e851a0c742f63a733ce19a5f8b9ce1c719b6cecd14f01bcf81fff2", - "transactionIndex": 2, - "eventIndex": 50, "eventName": "BatchPin", "payload": "eyJzaWduZXIiOiJ1MHZnd3U5czAwLXg1MDk6OkNOPXVzZXIyLE9VPWNsaWVudDo6Q049ZmFicmljLWNhLXNlcnZlciIsInRpbWVzdGFtcCI6eyJzZWNvbmRzIjoxNjMwMDMxNjY3LCJuYW5vcyI6NzkxNDk5MDAwfSwibmFtZXNwYWNlIjoiZmlyZWZseTp0ZXJtaW5hdGUiLCJ1dWlkcyI6IjB4MDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMCIsImJhdGNoSGFzaCI6IiIsInBheWxvYWRSZWYiOiIiLCJjb250ZXh0cyI6W119", "subId": "sb-0910f6a8-7bd6-4ced-453e-2db68149ce8e" @@ -2610,7 +2862,7 @@ func TestHandleNetworkAction(t *testing.T) { Value: "u0vgwu9s00-x509::CN=user2,OU=client::CN=fabric-ca-server", } - em.On("BlockchainNetworkAction", "terminate", mock.AnythingOfType("*fftypes.JSONAny"), mock.AnythingOfType("*blockchain.Event"), expectedSigningKeyRef).Return(nil) + em.On("BlockchainEventBatch", matchNetworkAction("terminate", *expectedSigningKeyRef)).Return(nil) var events []interface{} err := json.Unmarshal(data, &events) @@ -2629,8 +2881,6 @@ func TestHandleNetworkActionFail(t *testing.T) { "chaincodeId": "firefly", "blockNumber": 91, "transactionId": "ce79343000e851a0c742f63a733ce19a5f8b9ce1c719b6cecd14f01bcf81fff2", - "transactionIndex": 2, - "eventIndex": 50, "eventName": "BatchPin", "payload": "eyJzaWduZXIiOiJ1MHZnd3U5czAwLXg1MDk6OkNOPXVzZXIyLE9VPWNsaWVudDo6Q049ZmFicmljLWNhLXNlcnZlciIsInRpbWVzdGFtcCI6eyJzZWNvbmRzIjoxNjMwMDMxNjY3LCJuYW5vcyI6NzkxNDk5MDAwfSwibmFtZXNwYWNlIjoiZmlyZWZseTp0ZXJtaW5hdGUiLCJ1dWlkcyI6IjB4MDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMCIsImJhdGNoSGFzaCI6IiIsInBheWxvYWRSZWYiOiIiLCJjb250ZXh0cyI6W119", "subId": "sb-0910f6a8-7bd6-4ced-453e-2db68149ce8e" @@ -2654,7 +2904,7 @@ func TestHandleNetworkActionFail(t *testing.T) { Value: "u0vgwu9s00-x509::CN=user2,OU=client::CN=fabric-ca-server", } - em.On("BlockchainNetworkAction", "terminate", mock.AnythingOfType("*fftypes.JSONAny"), mock.AnythingOfType("*blockchain.Event"), expectedSigningKeyRef).Return(fmt.Errorf("pop")) + em.On("BlockchainEventBatch", matchNetworkAction("terminate", *expectedSigningKeyRef)).Return(fmt.Errorf("pop")) var events []interface{} err := json.Unmarshal(data, &events) @@ -2722,7 +2972,7 @@ func TestGetNetworkVersionFunctionNotFound(t *testing.T) { }.String()) httpmock.RegisterResponder("POST", "http://localhost:12345/query", - httpmock.NewJsonResponderOrPanic(500, fabError{Error: "Function NetworkVersion not found"})) + httpmock.NewJsonResponderOrPanic(500, common.BlockchainRESTError{Error: "Function NetworkVersion not found"})) version, err := e.GetNetworkVersion(context.Background(), location) @@ -2775,7 +3025,7 @@ func TestGetNetworkVersionFunctionNotFoundQueryFail(t *testing.T) { }.String()) httpmock.RegisterResponder("POST", "http://localhost:12345/query", - httpmock.NewJsonResponderOrPanic(500, fabError{Error: "pop"})) + httpmock.NewJsonResponderOrPanic(500, common.BlockchainRESTError{Error: "pop"})) version, err := e.GetNetworkVersion(context.Background(), location) @@ -3066,6 +3316,24 @@ func TestValidateInvokeRequest(t *testing.T) { e, cancel := newTestFabric() defer cancel() - err := e.ValidateInvokeRequest(context.Background(), nil, nil, nil, false) + err := e.ValidateInvokeRequest(context.Background(), &ffiMethodAndErrors{ + method: &fftypes.FFIMethod{}, + }, nil, false) assert.NoError(t, err) } + +func TestInvokeContractBadFFI(t *testing.T) { + e, cancel := newTestFabric() + defer cancel() + + _, err := e.InvokeContract(context.Background(), "", "", nil, nil, nil, nil, nil) + assert.Regexp(t, "FF10457", err) +} + +func TestQueryContractBadFFI(t *testing.T) { + e, cancel := newTestFabric() + defer cancel() + + _, err := e.QueryContract(context.Background(), "", nil, nil, nil, nil) + assert.Regexp(t, "FF10457", err) +} diff --git a/internal/blockchain/tezos/address_resolver.go b/internal/blockchain/tezos/address_resolver.go new file mode 100644 index 0000000000..2b8770a3ed --- /dev/null +++ b/internal/blockchain/tezos/address_resolver.go @@ -0,0 +1,148 @@ +// Copyright © 2023 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tezos + +import ( + "context" + "html/template" + "strings" + + "github.com/go-resty/resty/v2" + "github.com/hyperledger/firefly-common/pkg/config" + "github.com/hyperledger/firefly-common/pkg/ffresty" + "github.com/hyperledger/firefly-common/pkg/fftypes" + "github.com/hyperledger/firefly-common/pkg/i18n" + "github.com/hyperledger/firefly/internal/cache" + "github.com/hyperledger/firefly/internal/coreconfig" + "github.com/hyperledger/firefly/internal/coremsgs" + "github.com/hyperledger/firefly/pkg/blockchain" +) + +// addressResolver is a REST-pluggable interface to allow arbitrary strings that reference +// keys, to be resolved down to an Tezos address - which will be kept in a LRU cache. +// This supports cases where the signing device behind Tezos is able to support keys +// addressed using somthing like a HD Wallet hierarchical syntax. +// Once the resolver has returned the String->Address mapping, the tezosconnect downstream +// signing process must be able to process using the resolved tezos address (meaning +// it might have to reliably store the reverse mapping, it the case of a HD wallet). +type addressResolver struct { + retainOriginal bool + method string + urlTemplate *template.Template + bodyTemplate *template.Template + responseField string + client *resty.Client + cache cache.CInterface +} + +type addressResolverInserts struct { + Key string + Intent blockchain.ResolveKeyIntent +} + +func newAddressResolver(ctx context.Context, localConfig config.Section, cacheManager cache.Manager, enableCache bool) (ar *addressResolver, err error) { + client, err := ffresty.New(ctx, localConfig) + if err != nil { + return nil, err + } + + ar = &addressResolver{ + retainOriginal: localConfig.GetBool(AddressResolverRetainOriginal), + method: localConfig.GetString(AddressResolverMethod), + responseField: localConfig.GetString(AddressResolverResponseField), + client: client, + } + if enableCache { + ar.cache, err = cacheManager.GetCache( + cache.NewCacheConfig( + ctx, + coreconfig.CacheAddressResolverLimit, + coreconfig.CacheAddressResolverTTL, + "", + ), + ) + if err != nil { + return nil, err + } + } + + urlTemplateString := localConfig.GetString(AddressResolverURLTemplate) + ar.urlTemplate, err = template.New(AddressResolverURLTemplate).Option("missingkey=error").Parse(urlTemplateString) + if err != nil { + return nil, i18n.NewError(ctx, coremsgs.MsgGoTemplateCompileFailed, AddressResolverURLTemplate, err) + } + + bodyTemplateString := localConfig.GetString(AddressResolverBodyTemplate) + if bodyTemplateString != "" { + ar.bodyTemplate, err = template.New(AddressResolverBodyTemplate).Option("missingkey=error").Parse(bodyTemplateString) + if err != nil { + return nil, i18n.NewError(ctx, coremsgs.MsgGoTemplateCompileFailed, AddressResolverBodyTemplate, err) + } + } + + return ar, nil +} + +func (ar *addressResolver) ResolveSigningKey(ctx context.Context, keyDescriptor string, intent blockchain.ResolveKeyIntent) (string, error) { + if ar.cache != nil { + if cached := ar.cache.GetString(keyDescriptor); cached != "" { + return cached, nil + } + } + + inserts := &addressResolverInserts{ + Key: keyDescriptor, + Intent: intent, + } + + urlStr := &strings.Builder{} + err := ar.urlTemplate.Execute(urlStr, inserts) + if err != nil { + return "", i18n.NewError(ctx, coremsgs.MsgGoTemplateExecuteFailed, AddressResolverURLTemplate, err) + } + + bodyStr := &strings.Builder{} + if ar.bodyTemplate != nil { + err := ar.bodyTemplate.Execute(bodyStr, inserts) + if err != nil { + return "", i18n.NewError(ctx, coremsgs.MsgGoTemplateExecuteFailed, AddressResolverBodyTemplate, err) + } + } + + var jsonRes fftypes.JSONObject + res, err := ar.client.NewRequest(). + SetContext(ctx). + SetBody(bodyStr.String()). + SetResult(&jsonRes). + Execute(ar.method, urlStr.String()) + if err != nil { + return "", i18n.NewError(ctx, coremsgs.MsgAddressResolveFailed, keyDescriptor, err) + } + if res.IsError() { + return "", i18n.NewError(ctx, coremsgs.MsgAddressResolveBadStatus, keyDescriptor, res.StatusCode(), jsonRes.String()) + } + + address, err := formatTezosAddress(ctx, jsonRes.GetString(ar.responseField)) + if err != nil { + return "", i18n.NewError(ctx, coremsgs.MsgAddressResolveBadResData, keyDescriptor, jsonRes.String(), err) + } + + if ar.cache != nil { + ar.cache.SetString(keyDescriptor, address) + } + return address, nil +} diff --git a/internal/blockchain/tezos/address_resolver_test.go b/internal/blockchain/tezos/address_resolver_test.go new file mode 100644 index 0000000000..b1135bb632 --- /dev/null +++ b/internal/blockchain/tezos/address_resolver_test.go @@ -0,0 +1,317 @@ +// Copyright © 2023 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tezos + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/hyperledger/firefly-common/pkg/config" + "github.com/hyperledger/firefly-common/pkg/fftls" + "github.com/hyperledger/firefly-common/pkg/fftypes" + "github.com/hyperledger/firefly/internal/cache" + "github.com/hyperledger/firefly/internal/coreconfig" + "github.com/hyperledger/firefly/mocks/cachemocks" + "github.com/hyperledger/firefly/pkg/blockchain" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "golang.org/x/net/context" +) + +func utAddresResolverConfig() config.Section { + coreconfig.Reset() + config := config.RootSection("utaddressresovler") + (&Tezos{}).InitConfig(config) + return config.SubSection(AddressResolverConfigKey) +} + +func TestCacheInitFail(t *testing.T) { + cacheInitError := errors.New("Initialization error.") + ctx := context.Background() + config := utAddresResolverConfig() + + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(nil, cacheInitError) + _, err := newAddressResolver(ctx, config, cmi, true) + assert.Equal(t, cacheInitError, err) +} + +func TestClientInitFails(t *testing.T) { + ctx := context.Background() + config := utAddresResolverConfig() + tlsConfig := config.SubSection("tls") + tlsConfig.Set(fftls.HTTPConfTLSEnabled, true) + tlsConfig.Set(fftls.HTTPConfTLSCAFile, "bad-ca!") + + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(nil, cache.NewUmanagedCache(ctx, 100, 5*time.Minute), nil) + _, err := newAddressResolver(ctx, config, cmi, true) + assert.Regexp(t, "FF00153", err) +} + +func newAddressResolverTestTezos(t *testing.T, config config.Section) (context.Context, *Tezos, context.CancelFunc) { + ctx, cancel := context.WithCancel(context.Background()) + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(ctx, 100, 5*time.Minute), nil) + tz := &Tezos{ctx: ctx} + var err error + tz.addressResolver, err = newAddressResolver(ctx, config, cmi, true) + assert.NoError(t, err) + return ctx, tz, cancel +} + +func TestAddressResolverInTezosOKCached(t *testing.T) { + count := 0 + addr := "tz1Y6GnVhC4EpcDDSmD3ibcC4WX6DJ4Q1QLN" + server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.Header().Set("Content-Type", "application/json") + assert.Equal(t, "GET", r.Method) + assert.Equal(t, "/resolve/testkeystring", r.URL.Path) + rw.WriteHeader(200) + rw.Write([]byte(fmt.Sprintf(`{"address":"%s"}`, addr))) + assert.Zero(t, count) + count++ + })) + defer server.Close() + + config := utAddresResolverConfig() + config.Set(AddressResolverURLTemplate, fmt.Sprintf("%s/resolve/{{.Key}}", server.URL)) + + ctx, tz, cancel := newAddressResolverTestTezos(t, config) + defer cancel() + + resolved, err := tz.ResolveSigningKey(ctx, "testkeystring", blockchain.ResolveKeyIntentSign) + assert.NoError(t, err) + assert.Equal(t, addr, resolved) + + resolved, err = tz.ResolveSigningKey(ctx, "testkeystring", blockchain.ResolveKeyIntentSign) // cached + assert.NoError(t, err) + assert.Equal(t, addr, resolved) + assert.Equal(t, 1, count) +} + +func TestAddressResolverURLEncode(t *testing.T) { + addr := "tz1Y6GnVhC4EpcDDSmD3ibcC4WX6DJ4Q1QLN" + server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.Header().Set("Content-Type", "application/json") + assert.Equal(t, "GET", r.Method) + assert.Equal(t, "/resolve/uri%3A%2F%2Ftestkeystring", r.URL.String()) + rw.WriteHeader(200) + rw.Write([]byte(fmt.Sprintf(`{"address":"%s"}`, addr))) + })) + defer server.Close() + + config := utAddresResolverConfig() + config.Set(AddressResolverURLTemplate, fmt.Sprintf("%s/resolve/{{ urlquery .Key }}", server.URL)) + + ctx, tz, cancel := newAddressResolverTestTezos(t, config) + defer cancel() + + resolved, err := tz.ResolveSigningKey(ctx, "uri://testkeystring", blockchain.ResolveKeyIntentSign) + assert.NoError(t, err) + assert.Equal(t, addr, resolved) +} + +func TestAddressResolverForceNoCacheAlwaysInvoke(t *testing.T) { + count := 0 + addr1 := "tz1Y6GnVhC4EpcDDSmD3ibcC4WX6DJ4Q1QLN" + addr2 := "tz1fffffffffffffffffffffffffffffffff" + server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.Header().Set("Content-Type", "application/json") + assert.Equal(t, "GET", r.Method) + assert.Equal(t, fmt.Sprintf("/resolve/%s", addr1), r.URL.Path) + rw.WriteHeader(200) + // arbitrarily map addr1 to addr2 + rw.Write([]byte(fmt.Sprintf(`{"address":"%s"}`, addr2))) + count++ + })) + defer server.Close() + + config := utAddresResolverConfig() + config.Set(AddressResolverURLTemplate, fmt.Sprintf("%s/resolve/{{.Key}}", server.URL)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tz := &Tezos{ + ctx: ctx, + addressResolveAlways: true, + } + var err error + tz.addressResolver, err = newAddressResolver(ctx, config, nil, false) + assert.NoError(t, err) + + resolved, err := tz.ResolveSigningKey(ctx, addr1, blockchain.ResolveKeyIntentSign) + assert.NoError(t, err) + assert.Equal(t, addr2, resolved) + + resolved, err = tz.ResolveSigningKey(ctx, addr1, blockchain.ResolveKeyIntentSign) + assert.NoError(t, err) + assert.Equal(t, addr2, resolved) + + assert.Equal(t, count, 2) +} + +func TestAddressResolverPOSTOk(t *testing.T) { + addr := "tz1Y6GnVhC4EpcDDSmD3ibcC4WX6DJ4Q1QLN" + server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + var jo fftypes.JSONObject + json.NewDecoder(r.Body).Decode(&jo) + assert.Equal(t, "testkeystring", jo.GetString("key")) + assert.Equal(t, "lookup", jo.GetString("intent")) + rw.Header().Set("Content-Type", "application/json") + rw.WriteHeader(200) + rw.Write([]byte(fmt.Sprintf(`{"Addr":"%s"}`, addr))) + })) + defer server.Close() + + config := utAddresResolverConfig() + config.Set(AddressResolverRetainOriginal, true) + config.Set(AddressResolverMethod, "POST") + config.Set(AddressResolverURLTemplate, fmt.Sprintf("%s/resolve", server.URL)) + config.Set(AddressResolverBodyTemplate, `{"key":"{{.Key}}","intent":"{{.Intent}}"}`) + config.Set(AddressResolverResponseField, "Addr") + + ctx, tz, cancel := newAddressResolverTestTezos(t, config) + defer cancel() + + resolved, err := tz.addressResolver.ResolveSigningKey(ctx, "testkeystring", blockchain.ResolveKeyIntentLookup) + assert.NoError(t, err) + + assert.Equal(t, addr, resolved) +} + +func TestAddressResolverPOSTBadKey(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.Header().Set("Content-Type", "application/json") + rw.WriteHeader(200) + rw.Write([]byte(`{"address":"badness","intent":"sign"}`)) + })) + defer server.Close() + + config := utAddresResolverConfig() + config.Set(AddressResolverMethod, "POST") + config.Set(AddressResolverURLTemplate, fmt.Sprintf("%s/resolve", server.URL)) + config.Set(AddressResolverBodyTemplate, `{"key":"{{.Key}}","intent":"{{.Intent}}"}`) + + ctx, tz, cancel := newAddressResolverTestTezos(t, config) + defer cancel() + + _, err := tz.addressResolver.ResolveSigningKey(ctx, "testkeystring", blockchain.ResolveKeyIntentSign) + assert.Regexp(t, "FF10341", err) +} + +func TestAddressResolverPOSTResponse(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.WriteHeader(204) + })) + defer server.Close() + + config := utAddresResolverConfig() + config.Set(AddressResolverMethod, "POST") + config.Set(AddressResolverURLTemplate, fmt.Sprintf("%s/resolve", server.URL)) + config.Set(AddressResolverBodyTemplate, `{"key":"{{.Key}}"}`) + + ctx, tz, cancel := newAddressResolverTestTezos(t, config) + defer cancel() + + _, err := tz.addressResolver.ResolveSigningKey(ctx, "testkeystring", blockchain.ResolveKeyIntentSign) + assert.Regexp(t, "FF10341", err) +} + +func TestAddressResolverFailureResponse(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.WriteHeader(500) + })) + defer server.Close() + + config := utAddresResolverConfig() + config.Set(AddressResolverURLTemplate, fmt.Sprintf("%s/resolve/{{.Key}}", server.URL)) + + ctx, tz, cancel := newAddressResolverTestTezos(t, config) + defer cancel() + + _, err := tz.addressResolver.ResolveSigningKey(ctx, "testkeystring", blockchain.ResolveKeyIntentSign) + assert.Regexp(t, "FF10340", err) +} + +func TestAddressResolverErrorResponse(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.WriteHeader(500) + })) + server.Close() // close immediately + + config := utAddresResolverConfig() + config.Set(AddressResolverURLTemplate, fmt.Sprintf("%s/resolve/{{.Key}}", server.URL)) + + ctx, tz, cancel := newAddressResolverTestTezos(t, config) + defer cancel() + + _, err := tz.addressResolver.ResolveSigningKey(ctx, "testkeystring", blockchain.ResolveKeyIntentSign) + assert.Regexp(t, "FF10339", err) +} + +func TestAddressResolverBadBodyTemplate(t *testing.T) { + config := utAddresResolverConfig() + config.Set(AddressResolverURLTemplate, "http://ff.example/resolve") + config.Set(AddressResolverBodyTemplate, `{{unclosed!}`) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(ctx, 100, 5*time.Minute), nil) + _, err := newAddressResolver(ctx, config, cmi, true) + assert.Regexp(t, "FF10337.*bodyTemplate", err) +} + +func TestAddressResolverErrorURLTemplate(t *testing.T) { + config := utAddresResolverConfig() + config.Set(AddressResolverURLTemplate, "http://ff.example/resolve/{{.Wrong}}") + + ctx, tz, cancel := newAddressResolverTestTezos(t, config) + defer cancel() + + _, err := tz.addressResolver.ResolveSigningKey(ctx, "testkeystring", blockchain.ResolveKeyIntentSign) + assert.Regexp(t, "FF10338.*urlTemplate", err) +} + +func TestAddressResolverErrorBodyTemplate(t *testing.T) { + config := utAddresResolverConfig() + config.Set(AddressResolverURLTemplate, "http://ff.example/resolve") + config.Set(AddressResolverBodyTemplate, "{{.Wrong}}") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(ctx, 100, 5*time.Minute), nil) + ar, err := newAddressResolver(ctx, config, cmi, true) + cmi.AssertCalled(t, "GetCache", cache.NewCacheConfig( + ctx, + coreconfig.CacheAddressResolverLimit, + coreconfig.CacheAddressResolverTTL, + "", + )) + assert.NoError(t, err) + + _, err = ar.ResolveSigningKey(ctx, "testkeystring", blockchain.ResolveKeyIntentSign) + assert.Regexp(t, "FF10338.*bodyTemplate", err) +} diff --git a/internal/blockchain/tezos/config.go b/internal/blockchain/tezos/config.go new file mode 100644 index 0000000000..1fe909d7da --- /dev/null +++ b/internal/blockchain/tezos/config.go @@ -0,0 +1,102 @@ +// Copyright © 2023 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tezos + +import ( + "github.com/hyperledger/firefly-common/pkg/config" + "github.com/hyperledger/firefly-common/pkg/ffresty" + "github.com/hyperledger/firefly-common/pkg/wsclient" +) + +const ( + defaultBatchSize = 50 + defaultBatchTimeout = 500 + defaultPrefixShort = "fly" + defaultPrefixLong = "firefly" + + defaultAddressResolverMethod = "GET" + defaultAddressResolverResponseField = "address" + + defaultBackgroundInitialDelay = "5s" + defaultBackgroundRetryFactor = 2.0 + defaultBackgroundMaxDelay = "1m" +) + +const ( + // TezosconnectConfigKey is a sub-key in the config to contain all the tezosconnect specific config + TezosconnectConfigKey = "tezosconnect" + // TezosconnectConfigTopic is the websocket listen topic that the node should register on, which is important if there are multiple + // nodes using a single tezosconnect + TezosconnectConfigTopic = "topic" + // TezosconnectConfigBatchSize is the batch size to configure on event streams, when auto-defining them + TezosconnectConfigBatchSize = "batchSize" + // TezosconnectConfigBatchTimeout is the batch timeout to configure on event streams, when auto-defining them + TezosconnectConfigBatchTimeout = "batchTimeout" + // TezosconnectPrefixShort is used in the query string in requests to tezosconnect + TezosconnectPrefixShort = "prefixShort" + // TezosconnectPrefixLong is used in HTTP headers in requests to tezosconnect + TezosconnectPrefixLong = "prefixLong" + // TezosconnectBackgroundStart is used to not fail the tezos plugin on init and retry to start it in the background + TezosconnectBackgroundStart = "backgroundStart.enabled" + // TezosconnectBackgroundStartInitialDelay is delay between restarts in the case where we retry to restart in the tezos plugin + TezosconnectBackgroundStartInitialDelay = "backgroundStart.initialDelay" + // TezosconnectBackgroundStartMaxDelay is the max delay between restarts in the case where we retry to restart in the tezos plugin + TezosconnectBackgroundStartMaxDelay = "backgroundStart.maxDelay" + // TezosconnectBackgroundStartFactor is to set the factor by which the delay increases when retrying + TezosconnectBackgroundStartFactor = "backgroundStart.factor" + + // AddressResolverConfigKey is a sub-key in the config to contain an address resolver config. + AddressResolverConfigKey = "addressResolver" + // AddressResolverAlwaysResolve causes the address resolve to be invoked on every API call that resolves an address and disables any caching + AddressResolverAlwaysResolve = "alwaysResolve" + // AddressResolverRetainOriginal when true the original pre-resolved string is retained after the lookup, and passed down to Tezosconnect as the from address + AddressResolverRetainOriginal = "retainOriginal" + // AddressResolverMethod the HTTP method to use to call the address resolver (default GET) + AddressResolverMethod = "method" + // AddressResolverURLTemplate the URL go template string to use when calling the address resolver - a ".intent" string can be used in the go template + AddressResolverURLTemplate = "urlTemplate" + // AddressResolverBodyTemplate the body go template string to use when calling the address resolver - a ".intent" string can be used in the go template + AddressResolverBodyTemplate = "bodyTemplate" + // AddressResolverResponseField the name of a JSON field that is provided in the response, that contains the tezos address (default "address") + AddressResolverResponseField = "responseField" + + // FFTMConfigKey is a sub-key in the config that optionally contains FireFly transaction connection information + FFTMConfigKey = "fftm" +) + +func (t *Tezos) InitConfig(config config.Section) { + t.tezosconnectConf = config.SubSection(TezosconnectConfigKey) + wsclient.InitConfig(t.tezosconnectConf) + t.tezosconnectConf.AddKnownKey(TezosconnectConfigTopic) + t.tezosconnectConf.AddKnownKey(TezosconnectBackgroundStart) + t.tezosconnectConf.AddKnownKey(TezosconnectBackgroundStartInitialDelay, defaultBackgroundInitialDelay) + t.tezosconnectConf.AddKnownKey(TezosconnectBackgroundStartFactor, defaultBackgroundRetryFactor) + t.tezosconnectConf.AddKnownKey(TezosconnectBackgroundStartMaxDelay, defaultBackgroundMaxDelay) + t.tezosconnectConf.AddKnownKey(TezosconnectConfigBatchSize, defaultBatchSize) + t.tezosconnectConf.AddKnownKey(TezosconnectConfigBatchTimeout, defaultBatchTimeout) + t.tezosconnectConf.AddKnownKey(TezosconnectPrefixShort, defaultPrefixShort) + t.tezosconnectConf.AddKnownKey(TezosconnectPrefixLong, defaultPrefixLong) + + addressResolverConf := config.SubSection(AddressResolverConfigKey) + ffresty.InitConfig(addressResolverConf) + addressResolverConf.AddKnownKey(AddressResolverAlwaysResolve) + addressResolverConf.AddKnownKey(AddressResolverRetainOriginal) + addressResolverConf.AddKnownKey(AddressResolverMethod, defaultAddressResolverMethod) + addressResolverConf.AddKnownKey(AddressResolverURLTemplate) + addressResolverConf.AddKnownKey(AddressResolverBodyTemplate) + addressResolverConf.AddKnownKey(AddressResolverResponseField, defaultAddressResolverResponseField) +} diff --git a/internal/blockchain/tezos/eventstream.go b/internal/blockchain/tezos/eventstream.go new file mode 100644 index 0000000000..f48f93d094 --- /dev/null +++ b/internal/blockchain/tezos/eventstream.go @@ -0,0 +1,244 @@ +// Copyright © 2023 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tezos + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + + "github.com/go-resty/resty/v2" + "github.com/hyperledger/firefly-common/pkg/ffresty" + "github.com/hyperledger/firefly-common/pkg/fftypes" + "github.com/hyperledger/firefly-common/pkg/log" + "github.com/hyperledger/firefly/internal/cache" + "github.com/hyperledger/firefly/internal/coremsgs" + "github.com/hyperledger/firefly/pkg/core" +) + +type streamManager struct { + client *resty.Client + cache cache.CInterface + batchSize uint + batchTimeout uint +} + +type eventStream struct { + ID string `json:"id"` + Name string `json:"name"` + ErrorHandling string `json:"errorHandling"` + BatchSize uint `json:"batchSize"` + BatchTimeoutMS uint `json:"batchTimeoutMS"` + Type string `json:"type"` + Timestamps bool `json:"timestamps"` +} + +type subscription struct { + ID string `json:"id"` + Name string `json:"name,omitempty"` + Stream string `json:"stream"` + FromBlock string `json:"fromBlock"` + TezosCompatAddress string `json:"address,omitempty"` + Filter eventFilter `json:"filter"` + Filters []fftypes.JSONAny `json:"filters"` + subscriptionCheckpoint +} + +type eventFilter struct { + EventFilter string `json:"eventFilter"` +} + +func newStreamManager(client *resty.Client, cache cache.CInterface, batchSize, batchTimeout uint) *streamManager { + return &streamManager{ + client: client, + cache: cache, + batchSize: batchSize, + batchTimeout: batchTimeout, + } +} + +func (s *streamManager) getEventStreams(ctx context.Context) (streams []*eventStream, err error) { + res, err := s.client.R(). + SetContext(ctx). + SetResult(&streams). + Get("/eventstreams") + if err != nil || !res.IsSuccess() { + return nil, ffresty.WrapRestErr(ctx, res, err, coremsgs.MsgTezosconnectRESTErr) + } + return streams, nil +} + +func buildEventStream(topic string, batchSize, batchTimeout uint) *eventStream { + return &eventStream{ + Name: topic, + ErrorHandling: "block", + BatchSize: batchSize, + BatchTimeoutMS: batchTimeout, + Type: "websocket", + Timestamps: true, + } +} + +func (s *streamManager) createEventStream(ctx context.Context, topic string) (*eventStream, error) { + stream := buildEventStream(topic, s.batchSize, s.batchTimeout) + res, err := s.client.R(). + SetContext(ctx). + SetBody(stream). + SetResult(stream). + Post("/eventstreams") + if err != nil || !res.IsSuccess() { + return nil, ffresty.WrapRestErr(ctx, res, err, coremsgs.MsgTezosconnectRESTErr) + } + return stream, nil +} + +func (s *streamManager) updateEventStream(ctx context.Context, topic string, batchSize, batchTimeout uint, eventStreamID string) (*eventStream, error) { + stream := buildEventStream(topic, batchSize, batchTimeout) + res, err := s.client.R(). + SetContext(ctx). + SetBody(stream). + SetResult(stream). + Patch("/eventstreams/" + eventStreamID) + if err != nil || !res.IsSuccess() { + return nil, ffresty.WrapRestErr(ctx, res, err, coremsgs.MsgTezosconnectRESTErr) + } + return stream, nil +} + +func (s *streamManager) ensureEventStream(ctx context.Context, topic string) (*eventStream, error) { + existingStreams, err := s.getEventStreams(ctx) + if err != nil { + return nil, err + } + for _, stream := range existingStreams { + if stream.Name == topic { + stream, err = s.updateEventStream(ctx, topic, s.batchSize, s.batchTimeout, stream.ID) + if err != nil { + return nil, err + } + return stream, nil + } + } + return s.createEventStream(ctx, topic) +} + +func (s *streamManager) getSubscriptions(ctx context.Context) (subs []*subscription, err error) { + res, err := s.client.R(). + SetContext(ctx). + SetResult(&subs). + Get("/subscriptions") + if err != nil || !res.IsSuccess() { + return nil, ffresty.WrapRestErr(ctx, res, err, coremsgs.MsgTezosconnectRESTErr) + } + return subs, nil +} + +func (s *streamManager) getSubscription(ctx context.Context, subID string, okNotFound bool) (sub *subscription, err error) { + res, err := s.client.R(). + SetContext(ctx). + SetResult(&sub). + Get(fmt.Sprintf("/subscriptions/%s", subID)) + if err != nil || !res.IsSuccess() { + if okNotFound && res.StatusCode() == 404 { + return nil, nil + } + return nil, ffresty.WrapRestErr(ctx, res, err, coremsgs.MsgTezosconnectRESTErr) + } + return sub, nil +} + +func (s *streamManager) createSubscription(ctx context.Context, location *Location, stream, name, event, firstEvent string) (*subscription, error) { + // Map FireFly "firstEvent" values to Tezos "fromBlock" values + switch firstEvent { + case string(core.SubOptsFirstEventOldest): + firstEvent = "0" + case string(core.SubOptsFirstEventNewest): + firstEvent = "latest" + } + + filters := make([]fftypes.JSONAny, 0) + filter := eventFilter{ + EventFilter: event, + } + filterJSON, _ := json.Marshal(filter) + filters = append(filters, fftypes.JSONAny(filterJSON)) + + sub := subscription{ + Name: name, + Stream: stream, + Filters: filters, + FromBlock: firstEvent, + } + + if location != nil { + sub.TezosCompatAddress = location.Address + } + + res, err := s.client.R(). + SetContext(ctx). + SetBody(&sub). + SetResult(&sub). + Post("/subscriptions") + if err != nil || !res.IsSuccess() { + return nil, ffresty.WrapRestErr(ctx, res, err, coremsgs.MsgTezosconnectRESTErr) + } + return &sub, nil +} + +func (s *streamManager) deleteSubscription(ctx context.Context, subID string, okNotFound bool) error { + res, err := s.client.R(). + SetContext(ctx). + Delete("/subscriptions/" + subID) + if err != nil || !res.IsSuccess() { + if okNotFound && res.StatusCode() == 404 { + return nil + } + return ffresty.WrapRestErr(ctx, res, err, coremsgs.MsgTezosconnectRESTErr) + } + return nil +} + +func (s *streamManager) ensureFireFlySubscription(ctx context.Context, namespace string, version int, instancePath, firstEvent, stream, event string) (sub *subscription, err error) { + // Include a hash of the instance path in the subscription, so if we ever point at a different + // contract configuration, we re-subscribe from block 0. + // We don't need full strength hashing, so just use the first 16 chars for readability. + instanceUniqueHash := hex.EncodeToString(sha256.New().Sum([]byte(instancePath)))[0:16] + + existingSubs, err := s.getSubscriptions(ctx) + if err != nil { + return nil, err + } + + v2Name := fmt.Sprintf("%s_%s_%s", namespace, event, instanceUniqueHash) + + for _, s := range existingSubs { + if s.Stream == stream { + if s.Name == v2Name { + return s, nil + } + } + } + + location := &Location{Address: instancePath} + if sub, err = s.createSubscription(ctx, location, stream, v2Name, event, firstEvent); err != nil { + return nil, err + } + log.L(ctx).Infof("%s subscription: %s", event, sub.ID) + return sub, nil +} diff --git a/internal/blockchain/tezos/ffi2michelson.go b/internal/blockchain/tezos/ffi2michelson.go new file mode 100644 index 0000000000..1930ddb4ef --- /dev/null +++ b/internal/blockchain/tezos/ffi2michelson.go @@ -0,0 +1,351 @@ +// Copyright © 2023 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tezos + +import ( + "errors" + "fmt" + + "blockwatch.cc/tzgo/micheline" + "blockwatch.cc/tzgo/tezos" +) + +// FFI schema types +const ( + _jsonArray = "array" +) + +// Tezos data types +const ( + _internalBoolean = "boolean" + _internalList = "list" + _internalStruct = "struct" + _internalInteger = "integer" + _internalNat = "nat" + _internalString = "string" + _internalVariant = "variant" + _internalOption = "option" + _internalAddress = "address" + _internalBytes = "bytes" +) + +func processArgs(payloadSchema map[string]interface{}, input map[string]interface{}, methodName string) (micheline.Parameters, error) { + params := micheline.Parameters{ + Entrypoint: methodName, + Value: micheline.NewPrim(micheline.D_UNIT), + } + + if input == nil { + return params, fmt.Errorf("must specify args") + } + if payloadSchema == nil { + return params, errors.New("no payload schema provided") + } + + rootType := payloadSchema["type"] + if rootType.(string) != _jsonArray { + return params, fmt.Errorf("payload schema must define a root type of \"array\"") + } + // we require the schema to use "prefixItems" to define the ordered array of arguments + pitems := payloadSchema["prefixItems"] + if pitems == nil { + return params, fmt.Errorf("payload schema must define a root type of \"array\" using \"prefixItems\"") + } + + items := pitems.([]interface{}) + + // If entrypoint doesn't accept parameters - send micheline.D_UNIT param (represents the absence of a meaningful value) + if len(items) == 0 { + return params, nil + } + if len(items) == 1 { + michelineVal, err := convertFFIParamToMichelsonParam(input, items[0]) + if err != nil { + return params, err + } + params.Value = michelineVal + } else { + seq := micheline.NewSeq() + for _, item := range items { + michelineVal, err := convertFFIParamToMichelsonParam(input, item) + if err != nil { + return params, err + } + seq.Args = append(seq.Args, michelineVal) + } + params.Value = seq + } + + return params, nil +} + +func convertFFIParamToMichelsonParam(argsMap map[string]interface{}, arg interface{}) (resp micheline.Prim, err error) { + argDef := arg.(map[string]interface{}) + propType := argDef["type"].(string) + details := argDef["details"].(map[string]interface{}) + name := argDef["name"] + if name == nil { + return resp, fmt.Errorf("property definitions of the \"prefixItems\" in the payload schema must have a \"name\"") + } + + entry := argsMap[name.(string)] + + if propType == _jsonArray { + resp = micheline.NewSeq() + for _, item := range entry.([]interface{}) { + prop, err := processMichelson(item, details) + if err != nil { + return resp, err + } + + resp.Args = append(resp.Args, prop) + } + } else { + resp, err = processMichelson(entry, details) + if err != nil { + return resp, err + } + } + + return resp, nil +} + +func processMichelson(entry interface{}, details map[string]interface{}) (resp micheline.Prim, err error) { + if details["type"] == "schema" { + internalSchema := details["internalSchema"].(map[string]interface{}) + resp, err = processSchemaEntry(entry, internalSchema) + } else { + internalType := details["internalType"].(string) + resp, err = processPrimitive(entry, internalType) + if err == nil { + propKind := details["kind"].(string) + resp = applyKind(resp, propKind) + } + } + + return resp, err +} + +func processSchemaEntry(entry interface{}, schema map[string]interface{}) (resp micheline.Prim, err error) { + entryType := schema["type"].(string) + switch entryType { + case _internalStruct: + schemaArgs := schema["args"].([]interface{}) + + var rightPairElem *micheline.Prim + for i := len(schemaArgs) - 1; i >= 0; i-- { + arg := schemaArgs[i].(map[string]interface{}) + + argName := arg["name"].(string) + elem := entry.(map[string]interface{}) + if _, ok := elem[argName]; !ok { + return resp, errors.New("Schema field '" + argName + "' wasn't found") + } + + processedEntry, err := processSchemaEntry(elem[argName], arg) + if err != nil { + return resp, err + } + newPair := forgePair(processedEntry, rightPairElem) + rightPairElem = &newPair + + resp = newPair + } + case _internalList: + schemaArgs := schema["args"].([]interface{}) + + for i := len(schemaArgs) - 1; i >= 0; i-- { + arg := schemaArgs[i].(map[string]interface{}) + + listResp := micheline.NewSeq() + for _, listElem := range entry.([]interface{}) { + processedEntry, err := processSchemaEntry(listElem, arg) + if err != nil { + return resp, err + } + listResp.Args = append(listResp.Args, processedEntry) + } + resp = listResp + } + case _internalVariant: + schemaArgs := schema["args"].([]interface{}) + arg := schemaArgs[0].(map[string]interface{}) + elem := entry.(map[string]interface{}) + + variants := schema["variants"].([]interface{}) + for i, variant := range variants { + if el, ok := elem[variant.(string)]; ok { + processedEntry, err := processSchemaEntry(el, arg) + if err != nil { + return resp, err + } + if len(variants) <= 1 || len(variants) > 4 { + return resp, errors.New("wrong number of variants") + } + resp = wrapWithVariant(processedEntry, i+1, len(variants)) + break + } + } + default: + resp, err = processPrimitive(entry, entryType) + } + + return resp, err +} + +// TODO: define an algorithm to support any number of variants. +// at the moment, support for up to 4 variants covers most cases +func wrapWithVariant(elem micheline.Prim, variantPos int, totalVariantsCount int) (resp micheline.Prim) { + switch totalVariantsCount { + case 2: + branch := micheline.D_LEFT + if variantPos == 2 { + branch = micheline.D_RIGHT + } + resp = micheline.NewCode( + branch, + elem, + ) + case 3: + switch variantPos { + case 1: + resp = micheline.NewCode( + micheline.D_LEFT, + elem, + ) + case 2: + resp = micheline.NewCode( + micheline.D_RIGHT, + micheline.NewCode( + micheline.D_LEFT, + elem, + ), + ) + case 3: + resp = micheline.NewCode( + micheline.D_RIGHT, + micheline.NewCode( + micheline.D_RIGHT, + elem, + ), + ) + } + case 4: + switch variantPos { + case 1: + resp = micheline.NewCode( + micheline.D_LEFT, + micheline.NewCode( + micheline.D_LEFT, + elem, + ), + ) + case 2: + resp = micheline.NewCode( + micheline.D_LEFT, + micheline.NewCode( + micheline.D_RIGHT, + elem, + ), + ) + case 3: + resp = micheline.NewCode( + micheline.D_RIGHT, + micheline.NewCode( + micheline.D_LEFT, + elem, + ), + ) + case 4: + resp = micheline.NewCode( + micheline.D_RIGHT, + micheline.NewCode( + micheline.D_RIGHT, + elem, + ), + ) + } + } + + return resp +} + +func forgePair(leftElem micheline.Prim, rightElem *micheline.Prim) micheline.Prim { + if rightElem == nil { + return leftElem + } + return micheline.NewPair(leftElem, *rightElem) +} + +func processPrimitive(entry interface{}, propType string) (resp micheline.Prim, err error) { + switch propType { + case _internalInteger, _internalNat: + entryValue, ok := entry.(float64) + if !ok { + return resp, errors.New("invalid object passed") + } + + resp = micheline.NewInt64(int64(entryValue)) + case _internalString: + arg, ok := entry.(string) + if !ok { + return resp, errors.New("invalid object passed") + } + + resp = micheline.NewString(arg) + case _internalBytes: + entryValue, ok := entry.(string) + if !ok { + return resp, errors.New("invalid object passed") + } + + resp = micheline.NewBytes([]byte(entryValue)) + case _internalBoolean: + entryValue, ok := entry.(bool) + if !ok { + return resp, errors.New("invalid object passed") + } + + opCode := micheline.D_FALSE + if entryValue { + opCode = micheline.D_TRUE + } + + resp = micheline.NewPrim(opCode) + case _internalAddress: + entryValue, ok := entry.(string) + if !ok { + return resp, errors.New("invalid object passed") + } + + address, err := tezos.ParseAddress(entryValue) + if err != nil { + return resp, err + } + + resp = micheline.NewAddress(address) + } + + return resp, nil +} + +func applyKind(param micheline.Prim, kind string) micheline.Prim { + if kind == _internalOption { + return micheline.NewOption(param) + } + return param +} diff --git a/internal/blockchain/tezos/ffi2michelson_test.go b/internal/blockchain/tezos/ffi2michelson_test.go new file mode 100644 index 0000000000..2a0568e5aa --- /dev/null +++ b/internal/blockchain/tezos/ffi2michelson_test.go @@ -0,0 +1,956 @@ +package tezos + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_processArgsOk(t *testing.T) { + methodName := "name" + + testCases := []struct { + name string + processSchemaReq map[string]interface{} + input map[string]interface{} + }{ + { + name: "no input params", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{}, + }, + input: map[string]interface{}{}, + }, + { + name: "primitive input param", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{ + map[string]interface{}{ + "name": "varNat", + "type": "integer", + "details": map[string]interface{}{ + "type": "integer", + "internalType": "nat", + "kind": "option", + }, + }, + }, + }, + input: map[string]interface{}{ + "varNat": float64(1), + }, + }, + { + name: "several primitive input params", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{ + map[string]interface{}{ + "name": "varNat", + "type": "integer", + "details": map[string]interface{}{ + "type": "integer", + "internalType": "nat", + "kind": "option", + }, + }, + map[string]interface{}{ + "name": "varString", + "type": "string", + "details": map[string]interface{}{ + "type": "string", + "internalType": "string", + "kind": "", + }, + }, + }, + }, + input: map[string]interface{}{ + "varNat": float64(1), + "varString": "str", + }, + }, + { + name: "array of primitives input param", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{ + map[string]interface{}{ + "name": "varArr", + "type": "array", + "details": map[string]interface{}{ + "type": "string", + "internalType": "string", + "internalSchema": nil, + "kind": "", + }, + }, + }, + }, + input: map[string]interface{}{ + "varArr": []interface{}{"str1", "str2", "str3"}, + }, + }, + { + name: "struct input param", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{ + map[string]interface{}{ + "name": "varStruct", + "type": "object", + "details": map[string]interface{}{ + "type": "schema", + "internalType": nil, + "internalSchema": map[string]interface{}{ + "type": "struct", + "args": []interface{}{ + map[string]interface{}{ + "name": "token_id", + "type": "nat", + }, + map[string]interface{}{ + "name": "token_name", + "type": "string", + }, + }, + }, + "kind": "", + }, + }, + }, + }, + input: map[string]interface{}{ + "varStruct": map[string]interface{}{ + "token_id": float64(1), + "token_name": "token name", + }, + }, + }, + { + name: "list of structures input param", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{ + map[string]interface{}{ + "name": "varList", + "type": "object", + "details": map[string]interface{}{ + "type": "schema", + "internalType": nil, + "internalSchema": map[string]interface{}{ + "name": "batch", + "type": "list", + "args": []interface{}{ + map[string]interface{}{ + "type": "struct", + "args": []interface{}{ + map[string]interface{}{ + "name": "token_id", + "type": "nat", + }, + map[string]interface{}{ + "name": "token_name", + "type": "string", + }, + }, + }, + }, + }, + "kind": "", + }, + }, + }, + }, + input: map[string]interface{}{ + "varList": []interface{}{ + map[string]interface{}{ + "token_id": float64(1), + "token_name": "token name", + }, + map[string]interface{}{ + "token_id": float64(2), + "token_name": "token name 2", + }, + }, + }, + }, + { + name: "variant(2 options - 1st) input param", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{ + map[string]interface{}{ + "name": "varVariant", + "type": "object", + "details": map[string]interface{}{ + "type": "schema", + "internalType": nil, + "internalSchema": map[string]interface{}{ + "type": "variant", + "variants": []interface{}{ + "add_string", + "remove_string", + }, + "args": []interface{}{ + map[string]interface{}{ + "name": "new_string", + "type": "string", + }, + }, + }, + "kind": "", + }, + }, + }, + }, + input: map[string]interface{}{ + "varVariant": map[string]interface{}{ + "add_string": "new str", + }, + }, + }, + { + name: "variant(2 options - 2nd) input param", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{ + map[string]interface{}{ + "name": "varVariant", + "type": "object", + "details": map[string]interface{}{ + "type": "schema", + "internalType": nil, + "internalSchema": map[string]interface{}{ + "type": "variant", + "variants": []interface{}{ + "add_string", + "remove_string", + }, + "args": []interface{}{ + map[string]interface{}{ + "name": "new_string", + "type": "string", + }, + }, + }, + "kind": "", + }, + }, + }, + }, + input: map[string]interface{}{ + "varVariant": map[string]interface{}{ + "remove_string": "str", + }, + }, + }, + { + name: "variant(3 options - 1st) input param", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{ + map[string]interface{}{ + "name": "varVariant", + "type": "object", + "details": map[string]interface{}{ + "type": "schema", + "internalType": nil, + "internalSchema": map[string]interface{}{ + "type": "variant", + "variants": []interface{}{ + "add_string", + "remove_string", + "edit_string", + }, + "args": []interface{}{ + map[string]interface{}{ + "name": "new_string", + "type": "string", + }, + }, + }, + "kind": "", + }, + }, + }, + }, + input: map[string]interface{}{ + "varVariant": map[string]interface{}{ + "add_string": "new str", + }, + }, + }, + { + name: "variant(3 options - 2nd) input param", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{ + map[string]interface{}{ + "name": "varVariant", + "type": "object", + "details": map[string]interface{}{ + "type": "schema", + "internalType": nil, + "internalSchema": map[string]interface{}{ + "type": "variant", + "variants": []interface{}{ + "add_string", + "remove_string", + "edit_string", + }, + "args": []interface{}{ + map[string]interface{}{ + "name": "new_string", + "type": "string", + }, + }, + }, + "kind": "", + }, + }, + }, + }, + input: map[string]interface{}{ + "varVariant": map[string]interface{}{ + "remove_string": "str", + }, + }, + }, + { + name: "variant(3 options - 3rd) input param", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{ + map[string]interface{}{ + "name": "varVariant", + "type": "object", + "details": map[string]interface{}{ + "type": "schema", + "internalType": nil, + "internalSchema": map[string]interface{}{ + "type": "variant", + "variants": []interface{}{ + "add_string", + "remove_string", + "edit_string", + }, + "args": []interface{}{ + map[string]interface{}{ + "name": "new_string", + "type": "string", + }, + }, + }, + "kind": "", + }, + }, + }, + }, + input: map[string]interface{}{ + "varVariant": map[string]interface{}{ + "edit_string": "new str", + }, + }, + }, + { + name: "variant(4 options - 1st) input param", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{ + map[string]interface{}{ + "name": "varVariant", + "type": "object", + "details": map[string]interface{}{ + "type": "schema", + "internalType": nil, + "internalSchema": map[string]interface{}{ + "type": "variant", + "variants": []interface{}{ + "add_string", + "remove_string", + "edit_string", + "read_string", + }, + "args": []interface{}{ + map[string]interface{}{ + "name": "new_string", + "type": "string", + }, + }, + }, + "kind": "", + }, + }, + }, + }, + input: map[string]interface{}{ + "varVariant": map[string]interface{}{ + "add_string": "new str", + }, + }, + }, + { + name: "variant(4 options - 2nd) input param", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{ + map[string]interface{}{ + "name": "varVariant", + "type": "object", + "details": map[string]interface{}{ + "type": "schema", + "internalType": nil, + "internalSchema": map[string]interface{}{ + "type": "variant", + "variants": []interface{}{ + "add_string", + "remove_string", + "edit_string", + "read_string", + }, + "args": []interface{}{ + map[string]interface{}{ + "name": "new_string", + "type": "string", + }, + }, + }, + "kind": "", + }, + }, + }, + }, + input: map[string]interface{}{ + "varVariant": map[string]interface{}{ + "remove_string": "str", + }, + }, + }, + { + name: "variant(4 options - 3rd) input param", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{ + map[string]interface{}{ + "name": "varVariant", + "type": "object", + "details": map[string]interface{}{ + "type": "schema", + "internalType": nil, + "internalSchema": map[string]interface{}{ + "type": "variant", + "variants": []interface{}{ + "add_string", + "remove_string", + "edit_string", + "read_string", + }, + "args": []interface{}{ + map[string]interface{}{ + "name": "new_string", + "type": "string", + }, + }, + }, + "kind": "", + }, + }, + }, + }, + input: map[string]interface{}{ + "varVariant": map[string]interface{}{ + "edit_string": "str", + }, + }, + }, + { + name: "variant(4 options - 4th) input param", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{ + map[string]interface{}{ + "name": "varVariant", + "type": "object", + "details": map[string]interface{}{ + "type": "schema", + "internalType": nil, + "internalSchema": map[string]interface{}{ + "type": "variant", + "variants": []interface{}{ + "add_string", + "remove_string", + "edit_string", + "read_string", + }, + "args": []interface{}{ + map[string]interface{}{ + "name": "new_string", + "type": "string", + }, + }, + }, + "kind": "", + }, + }, + }, + }, + input: map[string]interface{}{ + "varVariant": map[string]interface{}{ + "read_string": "str", + }, + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + _, err := processArgs(tc.processSchemaReq, tc.input, methodName) + assert.NoError(t, err) + }) + } +} + +func Test_processArgsErr(t *testing.T) { + methodName := "name" + + testCases := []struct { + name string + processSchemaReq map[string]interface{} + input map[string]interface{} + expectedError string + }{ + { + name: "nil schema", + processSchemaReq: nil, + input: map[string]interface{}{}, + expectedError: "no payload schema provided", + }, + { + name: "nil input", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{}, + }, + input: nil, + expectedError: "must specify args", + }, + { + name: "schema wrong root type", + processSchemaReq: map[string]interface{}{ + "type": "wrong", + "prefixItems": []interface{}{}, + }, + input: map[string]interface{}{}, + expectedError: "payload schema must define a root type of \"array\"", + }, + { + name: "schema nil prefixItems", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": nil, + }, + input: map[string]interface{}{}, + expectedError: "using \"prefixItems\"", + }, + { + name: "empty name", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{ + map[string]interface{}{ + "type": "integer", + "details": map[string]interface{}{ + "type": "integer", + "internalType": "nat", + }, + }, + }, + }, + input: map[string]interface{}{}, + expectedError: "schema must have a \"name\"", + }, + { + name: "wrong integer/nat type", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{ + map[string]interface{}{ + "name": "varNat", + "type": "integer", + "details": map[string]interface{}{ + "type": "integer", + "internalType": "nat", + }, + }, + }, + }, + input: map[string]interface{}{ + "varNat": "wrong type", + }, + expectedError: "invalid object passed", + }, + { + name: "wrong string type", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{ + map[string]interface{}{ + "name": "varString", + "type": "string", + "details": map[string]interface{}{ + "type": "string", + "internalType": "string", + }, + }, + }, + }, + input: map[string]interface{}{ + "varString": struct{}{}, + }, + expectedError: "invalid object passed", + }, + { + name: "wrong bytes type", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{ + map[string]interface{}{ + "name": "varBytes", + "type": "string", + "details": map[string]interface{}{ + "type": "bytes", + "internalType": "bytes", + }, + }, + }, + }, + input: map[string]interface{}{ + "varBytes": struct{}{}, + }, + expectedError: "invalid object passed", + }, + { + name: "wrong boolean type", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{ + map[string]interface{}{ + "name": "varBool", + "type": "boolean", + "details": map[string]interface{}{ + "type": "boolean", + "internalType": "boolean", + }, + }, + }, + }, + input: map[string]interface{}{ + "varBool": struct{}{}, + }, + expectedError: "invalid object passed", + }, + { + name: "wrong address type", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{ + map[string]interface{}{ + "name": "varAddress", + "type": "boolean", + "details": map[string]interface{}{ + "type": "address", + "internalType": "address", + }, + }, + }, + }, + input: map[string]interface{}{ + "varAddress": struct{}{}, + }, + expectedError: "invalid object passed", + }, + { + name: "wrong address type 2", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{ + map[string]interface{}{ + "name": "varAddress", + "type": "boolean", + "details": map[string]interface{}{ + "type": "address", + "internalType": "address", + }, + }, + }, + }, + input: map[string]interface{}{ + "varAddress": "wrong address", + }, + expectedError: "unknown address type", + }, + { + name: "several primitive input params - wrong type", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{ + map[string]interface{}{ + "name": "varNat", + "type": "integer", + "details": map[string]interface{}{ + "type": "integer", + "internalType": "nat", + "kind": "option", + }, + }, + map[string]interface{}{ + "name": "varString", + "type": "string", + "details": map[string]interface{}{ + "type": "string", + "internalType": "string", + "kind": "", + }, + }, + }, + }, + input: map[string]interface{}{ + "varNat": float64(1), + "varString": struct{}{}, + }, + expectedError: "invalid object passed", + }, + { + name: "different array item types", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{ + map[string]interface{}{ + "name": "varArr", + "type": "array", + "details": map[string]interface{}{ + "type": "string", + "internalType": "string", + "kind": "", + }, + }, + }, + }, + input: map[string]interface{}{ + "varArr": []interface{}{"str1", 1, struct{}{}}, + }, + expectedError: "invalid object passed", + }, + { + name: "struct input param - empty schema field", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{ + map[string]interface{}{ + "name": "varStruct", + "type": "object", + "details": map[string]interface{}{ + "type": "schema", + "internalType": nil, + "internalSchema": map[string]interface{}{ + "type": "struct", + "args": []interface{}{ + map[string]interface{}{ + "name": "token_id", + "type": "nat", + }, + map[string]interface{}{ + "name": "token_name", + "type": "string", + }, + }, + }, + "kind": "", + }, + }, + }, + }, + input: map[string]interface{}{ + "varStruct": map[string]interface{}{ + "token_name": "token name", + }, + }, + expectedError: "wasn't found", + }, + { + name: "struct input param - wrong type", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{ + map[string]interface{}{ + "name": "varStruct", + "type": "object", + "details": map[string]interface{}{ + "type": "schema", + "internalType": nil, + "internalSchema": map[string]interface{}{ + "type": "struct", + "args": []interface{}{ + map[string]interface{}{ + "name": "token_id", + "type": "nat", + }, + map[string]interface{}{ + "name": "token_name", + "type": "string", + }, + }, + }, + "kind": "", + }, + }, + }, + }, + input: map[string]interface{}{ + "varStruct": map[string]interface{}{ + "token_id": "wrong type", + "token_name": "token name", + }, + }, + expectedError: "invalid object passed", + }, + { + name: "list of structures input param - wrong type", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{ + map[string]interface{}{ + "name": "varList", + "type": "object", + "details": map[string]interface{}{ + "type": "schema", + "internalType": nil, + "internalSchema": map[string]interface{}{ + "name": "batch", + "type": "list", + "args": []interface{}{ + map[string]interface{}{ + "type": "struct", + "args": []interface{}{ + map[string]interface{}{ + "name": "token_id", + "type": "nat", + }, + map[string]interface{}{ + "name": "token_name", + "type": "string", + }, + }, + }, + }, + }, + "kind": "", + "variants": nil, + }, + }, + }, + }, + input: map[string]interface{}{ + "varList": []interface{}{ + map[string]interface{}{ + "token_id": "wrong type", + "token_name": "token name", + }, + map[string]interface{}{ + "token_id": float64(2), + "token_name": "token name 2", + }, + }, + }, + expectedError: "invalid object passed", + }, + { + name: "invalid num of variants", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{ + map[string]interface{}{ + "name": "varVariant", + "type": "object", + "details": map[string]interface{}{ + "type": "schema", + "internalType": nil, + "internalSchema": map[string]interface{}{ + "type": "variant", + "variants": []interface{}{ + "add_string", + "remove_string", + "edit_string", + "read_string", + "excess_variant", + }, + "args": []interface{}{ + map[string]interface{}{ + "name": "new_string", + "type": "string", + }, + }, + }, + "kind": "", + }, + }, + }, + }, + input: map[string]interface{}{ + "varVariant": map[string]interface{}{ + "add_string": "str", + }, + }, + expectedError: "wrong number of variants", + }, + { + name: "wrong variant type", + processSchemaReq: map[string]interface{}{ + "type": "array", + "prefixItems": []interface{}{ + map[string]interface{}{ + "name": "varVariant", + "type": "object", + "details": map[string]interface{}{ + "type": "schema", + "internalType": nil, + "internalSchema": map[string]interface{}{ + "type": "variant", + "variants": []interface{}{ + "add_string", + "remove_string", + "edit_string", + "read_string", + "excess_variant", + }, + "args": []interface{}{ + map[string]interface{}{ + "name": "new_string", + "type": "string", + }, + }, + }, + "kind": "", + }, + }, + }, + }, + input: map[string]interface{}{ + "varVariant": map[string]interface{}{ + "add_string": struct{}{}, + }, + }, + expectedError: "invalid object passed", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + _, err := processArgs(tc.processSchemaReq, tc.input, methodName) + assert.Error(t, err) + assert.Regexp(t, tc.expectedError, err) + }) + } +} diff --git a/internal/blockchain/tezos/tezos.go b/internal/blockchain/tezos/tezos.go new file mode 100644 index 0000000000..f5dd984d4a --- /dev/null +++ b/internal/blockchain/tezos/tezos.go @@ -0,0 +1,741 @@ +// Copyright © 2023 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tezos + +import ( + "context" + "encoding/json" + "fmt" + "regexp" + + "blockwatch.cc/tzgo/micheline" + "github.com/go-resty/resty/v2" + "github.com/hyperledger/firefly-common/pkg/config" + "github.com/hyperledger/firefly-common/pkg/ffresty" + "github.com/hyperledger/firefly-common/pkg/fftypes" + "github.com/hyperledger/firefly-common/pkg/i18n" + "github.com/hyperledger/firefly-common/pkg/log" + "github.com/hyperledger/firefly-common/pkg/retry" + "github.com/hyperledger/firefly-common/pkg/wsclient" + "github.com/hyperledger/firefly/internal/blockchain/common" + "github.com/hyperledger/firefly/internal/cache" + "github.com/hyperledger/firefly/internal/coreconfig" + "github.com/hyperledger/firefly/internal/coremsgs" + "github.com/hyperledger/firefly/internal/metrics" + "github.com/hyperledger/firefly/pkg/blockchain" + "github.com/hyperledger/firefly/pkg/core" +) + +const ( + tezosTxStatusPending string = "Pending" +) + +const ( + ReceiptTransactionSuccess string = "TransactionSuccess" + ReceiptTransactionFailed string = "TransactionFailed" +) + +type Tezos struct { + ctx context.Context + cancelCtx context.CancelFunc + pluginTopic string + prefixShort string + prefixLong string + capabilities *blockchain.Capabilities + callbacks common.BlockchainCallbacks + client *resty.Client + streams *streamManager + streamID string + wsconn wsclient.WSClient + closed chan struct{} + addressResolveAlways bool + addressResolver *addressResolver + metrics metrics.Manager + tezosconnectConf config.Section + subs common.FireflySubscriptions + cache cache.CInterface + backgroundRetry *retry.Retry + backgroundStart bool +} + +type Location struct { + Address string `json:"address"` +} + +var batchPinEvent = "BatchPin" + +type ListenerCheckpoint struct { + Block int64 `json:"block"` + TransactionBatchIndex int64 `json:"transactionBatchIndex"` + TransactionIndex int64 `json:"transactionIndex"` + MetaInternalResultIndex int64 `json:"metaInternalResultIndex"` +} + +type ListenerStatus struct { + Checkpoint ListenerCheckpoint `json:"checkpoint"` + Catchup bool `json:"catchup"` +} + +type subscriptionCheckpoint struct { + Checkpoint ListenerCheckpoint `json:"checkpoint,omitempty"` + Catchup bool `json:"catchup,omitempty"` +} + +type TezosconnectMessageHeaders struct { + Type string `json:"type,omitempty"` + ID string `json:"id,omitempty"` +} + +type PayloadSchema struct { + Type string `json:"type"` + PrefixItems []*PrefixItem `json:"prefixItems"` +} + +type PrefixItem struct { + Name string `json:"name"` + Type string `json:"type"` + Details paramDetails `json:"details,omitempty"` +} + +type paramDetails struct { + Type string `json:"type"` + InternalType string `json:"internalType"` + InternalSchema fftypes.JSONObject `json:"internalSchema"` + Kind string `json:"kind"` + Variants []string `json:"variants"` +} +type ffiParamSchema struct { + Type string `json:"type,omitempty"` + Details paramDetails `json:"details,omitempty"` +} + +type ffiMethodAndErrors struct { + method *fftypes.FFIMethod + errors []*fftypes.FFIError +} + +type tezosWSCommandPayload struct { + Type string `json:"type"` + Topic string `json:"topic,omitempty"` +} + +var addressVerify = regexp.MustCompile("^(tz[1-4]|[Kk][Tt]1)[1-9A-Za-z]{33}$") + +func (t *Tezos) Name() string { + return "tezos" +} + +func (t *Tezos) VerifierType() core.VerifierType { + return core.VerifierTypeTezosAddress +} + +func (t *Tezos) Init(ctx context.Context, cancelCtx context.CancelFunc, conf config.Section, metrics metrics.Manager, cacheManager cache.Manager) (err error) { + t.InitConfig(conf) + tezosconnectConf := t.tezosconnectConf + addressResolverConf := conf.SubSection(AddressResolverConfigKey) + + t.ctx = log.WithLogField(ctx, "proto", "tezos") + t.cancelCtx = cancelCtx + t.metrics = metrics + t.capabilities = &blockchain.Capabilities{} + t.callbacks = common.NewBlockchainCallbacks() + t.subs = common.NewFireflySubscriptions() + + if addressResolverConf.GetString(AddressResolverURLTemplate) != "" { + // Check if we need to invoke the address resolver (without caching) on every call + t.addressResolveAlways = addressResolverConf.GetBool(AddressResolverAlwaysResolve) + if t.addressResolver, err = newAddressResolver(ctx, addressResolverConf, cacheManager, !t.addressResolveAlways); err != nil { + return err + } + } + + if tezosconnectConf.GetString(ffresty.HTTPConfigURL) == "" { + return i18n.NewError(ctx, coremsgs.MsgMissingPluginConfig, "url", tezosconnectConf) + } + + wsConfig, err := wsclient.GenerateConfig(ctx, tezosconnectConf) + if err == nil { + t.client, err = ffresty.New(t.ctx, tezosconnectConf) + } + + if err != nil { + return err + } + + t.pluginTopic = tezosconnectConf.GetString(TezosconnectConfigTopic) + if t.pluginTopic == "" { + return i18n.NewError(ctx, coremsgs.MsgMissingPluginConfig, "topic", "blockchain.tezos.tezosconnect") + } + t.prefixShort = tezosconnectConf.GetString(TezosconnectPrefixShort) + t.prefixLong = tezosconnectConf.GetString(TezosconnectPrefixLong) + + if wsConfig.WSKeyPath == "" { + wsConfig.WSKeyPath = "/ws" + } + t.wsconn, err = wsclient.New(ctx, wsConfig, nil, t.afterConnect) + if err != nil { + return err + } + cache, err := cacheManager.GetCache( + cache.NewCacheConfig( + ctx, + coreconfig.CacheBlockchainLimit, + coreconfig.CacheBlockchainTTL, + "", + ), + ) + if err != nil { + return err + } + t.cache = cache + + t.streams = newStreamManager(t.client, t.cache, t.tezosconnectConf.GetUint(TezosconnectConfigBatchSize), uint(t.tezosconnectConf.GetDuration(TezosconnectConfigBatchTimeout).Milliseconds())) + + t.backgroundStart = t.tezosconnectConf.GetBool(TezosconnectBackgroundStart) + if t.backgroundStart { + t.backgroundRetry = &retry.Retry{ + InitialDelay: t.tezosconnectConf.GetDuration(TezosconnectBackgroundStartInitialDelay), + MaximumDelay: t.tezosconnectConf.GetDuration(TezosconnectBackgroundStartMaxDelay), + Factor: t.tezosconnectConf.GetFloat64(TezosconnectBackgroundStartFactor), + } + + return nil + } + + stream, err := t.streams.ensureEventStream(t.ctx, t.pluginTopic) + if err != nil { + return err + } + + t.streamID = stream.ID + log.L(t.ctx).Infof("Event stream: %s (pluginTopic=%s)", t.streamID, t.pluginTopic) + + t.closed = make(chan struct{}) + go t.eventLoop() + + return nil +} + +func (t *Tezos) SetHandler(namespace string, handler blockchain.Callbacks) { + t.callbacks.SetHandler(namespace, handler) +} + +func (t *Tezos) SetOperationHandler(namespace string, handler core.OperationCallbacks) { + t.callbacks.SetOperationalHandler(namespace, handler) +} + +func (t *Tezos) Start() (err error) { + if t.backgroundStart { + go t.startBackgroundLoop() + return nil + } + + return t.wsconn.Connect() +} + +func (t *Tezos) Capabilities() *blockchain.Capabilities { + return t.capabilities +} + +func (t *Tezos) AddFireflySubscription(ctx context.Context, namespace *core.Namespace, contract *blockchain.MultipartyContract) (string, error) { + tezosLocation, err := t.parseContractLocation(ctx, contract.Location) + if err != nil { + return "", err + } + + version, _ := t.GetNetworkVersion(ctx, contract.Location) + + sub, err := t.streams.ensureFireFlySubscription(ctx, namespace.Name, version, tezosLocation.Address, contract.FirstEvent, t.streamID, batchPinEvent) + if err != nil { + return "", err + } + + t.subs.AddSubscription(ctx, namespace, version, sub.ID, nil) + return sub.ID, nil +} + +func (t *Tezos) RemoveFireflySubscription(ctx context.Context, subID string) { + t.subs.RemoveSubscription(ctx, subID) +} + +func (t *Tezos) ResolveSigningKey(ctx context.Context, key string, intent blockchain.ResolveKeyIntent) (resolved string, err error) { + if !t.addressResolveAlways { + // If there's no address resolver plugin, or addressResolveAlways is false, + // we check if it's already an tezos address - in which case we can just return it. + resolved, err = formatTezosAddress(ctx, key) + } + if t.addressResolveAlways || (err != nil && t.addressResolver != nil) { + // Either it's not a valid tezos address, + // or we've been configured to invoke the address resolver on every call + resolved, err = t.addressResolver.ResolveSigningKey(ctx, key, intent) + if err == nil { + log.L(ctx).Infof("Key '%s' resolved to '%s'", key, resolved) + return resolved, nil + } + } + return resolved, err +} + +func (t *Tezos) SubmitBatchPin(ctx context.Context, nsOpID, networkNamespace, signingKey string, batch *blockchain.BatchPin, location *fftypes.JSONAny) error { + // TODO: impl + return nil +} + +func (t *Tezos) SubmitNetworkAction(ctx context.Context, nsOpID string, signingKey string, action core.NetworkActionType, location *fftypes.JSONAny) error { + // TODO: impl + return nil +} + +func (t *Tezos) DeployContract(ctx context.Context, nsOpID, signingKey string, definition, contract *fftypes.JSONAny, input []interface{}, options map[string]interface{}) (submissionRejected bool, err error) { + return true, i18n.NewError(ctx, coremsgs.MsgNotSupportedByBlockchainPlugin) +} + +func (t *Tezos) ValidateInvokeRequest(ctx context.Context, parsedMethod interface{}, input map[string]interface{}, hasMessage bool) error { + // No additional validation beyond what is enforced by Contract Manager + _, _, err := t.recoverFFI(ctx, parsedMethod) + return err +} + +func (t *Tezos) InvokeContract(ctx context.Context, nsOpID string, signingKey string, location *fftypes.JSONAny, parsedMethod interface{}, input map[string]interface{}, options map[string]interface{}, batch *blockchain.BatchPin) (bool, error) { + tezosLocation, err := t.parseContractLocation(ctx, location) + if err != nil { + return true, err + } + + methodName, michelsonInput, err := t.prepareRequest(ctx, parsedMethod, input) + if err != nil { + return true, err + } + + // TODO: add batch pin support + + return t.invokeContractMethod(ctx, tezosLocation.Address, methodName, signingKey, nsOpID, michelsonInput, options) +} + +func (t *Tezos) QueryContract(ctx context.Context, signingKey string, location *fftypes.JSONAny, parsedMethod interface{}, input map[string]interface{}, options map[string]interface{}) (interface{}, error) { + tezosLocation, err := t.parseContractLocation(ctx, location) + if err != nil { + return nil, err + } + + methodName, michelsonInput, err := t.prepareRequest(ctx, parsedMethod, input) + if err != nil { + return nil, err + } + + res, err := t.queryContractMethod(ctx, tezosLocation.Address, methodName, signingKey, michelsonInput, options) + if err != nil || !res.IsSuccess() { + return nil, err + } + + var output interface{} + if err = json.Unmarshal(res.Body(), &output); err != nil { + return nil, err + } + return output, nil +} + +func (t *Tezos) ParseInterface(ctx context.Context, method *fftypes.FFIMethod, errors []*fftypes.FFIError) (interface{}, error) { + return &ffiMethodAndErrors{ + method: method, + errors: errors, + }, nil +} + +func (t *Tezos) NormalizeContractLocation(ctx context.Context, ntype blockchain.NormalizeType, location *fftypes.JSONAny) (result *fftypes.JSONAny, err error) { + parsed, err := t.parseContractLocation(ctx, location) + if err != nil { + return nil, err + } + return t.encodeContractLocation(ctx, parsed) +} + +func (t *Tezos) AddContractListener(ctx context.Context, listener *core.ContractListener) (err error) { + var location *Location + if listener.Location != nil { + location, err = t.parseContractLocation(ctx, listener.Location) + if err != nil { + return err + } + } + + subName := fmt.Sprintf("ff-sub-%s-%s", listener.Namespace, listener.ID) + firstEvent := string(core.SubOptsFirstEventNewest) + if listener.Options != nil { + firstEvent = listener.Options.FirstEvent + } + result, err := t.streams.createSubscription(ctx, location, t.streamID, subName, listener.Event.Name, firstEvent) + if err != nil { + return err + } + listener.BackendID = result.ID + return nil +} + +func (t *Tezos) DeleteContractListener(ctx context.Context, subscription *core.ContractListener, okNotFound bool) error { + return t.streams.deleteSubscription(ctx, subscription.BackendID, okNotFound) +} + +// Note: In state of development. Approach can be changed. +func (t *Tezos) GetContractListenerStatus(ctx context.Context, subID string, okNotFound bool) (found bool, status interface{}, err error) { + sub, err := t.streams.getSubscription(ctx, subID, okNotFound) + if err != nil || sub == nil { + return false, nil, err + } + + checkpoint := &ListenerStatus{ + Catchup: sub.Catchup, + Checkpoint: ListenerCheckpoint{ + Block: sub.Checkpoint.Block, + TransactionBatchIndex: sub.Checkpoint.TransactionBatchIndex, + TransactionIndex: sub.Checkpoint.TransactionIndex, + MetaInternalResultIndex: sub.Checkpoint.MetaInternalResultIndex, + }, + } + + return true, checkpoint, nil +} + +func (t *Tezos) GetFFIParamValidator(ctx context.Context) (fftypes.FFIParamValidator, error) { + // Tezosconnect does not require any additional validation beyond "JSON Schema correctness" at this time + return nil, nil +} + +func (t *Tezos) GenerateEventSignature(ctx context.Context, event *fftypes.FFIEventDefinition) string { + return event.Name +} + +func (t *Tezos) GenerateErrorSignature(ctx context.Context, event *fftypes.FFIErrorDefinition) string { + // TODO: impl + return "" +} + +func (t *Tezos) GenerateFFI(ctx context.Context, generationRequest *fftypes.FFIGenerationRequest) (*fftypes.FFI, error) { + return nil, i18n.NewError(ctx, coremsgs.MsgFFIGenerationUnsupported) +} + +func (t *Tezos) GetNetworkVersion(ctx context.Context, location *fftypes.JSONAny) (version int, err error) { + // Part of the FIR-12. https://github.com/hyperledger/firefly-fir/pull/12 + // Not actual for the Tezos as it's batch pin contract was after the proposal. + // TODO: get the network version from the batch pin contract + return 2, nil +} + +func (t *Tezos) GetAndConvertDeprecatedContractConfig(ctx context.Context) (location *fftypes.JSONAny, fromBlock string, err error) { + return nil, "", nil +} + +func (t *Tezos) GetTransactionStatus(ctx context.Context, operation *core.Operation) (interface{}, error) { + txnID := (&core.PreparedOperation{ID: operation.ID, Namespace: operation.Namespace}).NamespacedIDString() + + transactionRequestPath := fmt.Sprintf("/transactions/%s", txnID) + client := t.client + var resErr common.BlockchainRESTError + var statusResponse fftypes.JSONObject + res, err := client.R(). + SetContext(ctx). + SetError(&resErr). + SetResult(&statusResponse). + Get(transactionRequestPath) + if err != nil || !res.IsSuccess() { + if res.StatusCode() == 404 { + return nil, nil + } + return nil, common.WrapRESTError(ctx, &resErr, res, err, coremsgs.MsgTezosconnectRESTErr) + } + + receiptInfo := statusResponse.GetObject("receipt") + txStatus := statusResponse.GetString("status") + + if txStatus != "" { + var replyType string + if txStatus == "Succeeded" { + replyType = ReceiptTransactionSuccess + } else { + replyType = ReceiptTransactionFailed + } + // If the status has changed, mock up blockchain receipt as if we'd received it + // as a web socket notification + if (operation.Status == core.OpStatusPending || operation.Status == core.OpStatusInitialized) && txStatus != tezosTxStatusPending { + receipt := &common.BlockchainReceiptNotification{ + Headers: common.BlockchainReceiptHeaders{ + ReceiptID: statusResponse.GetString("id"), + ReplyType: replyType, + }, + TxHash: statusResponse.GetString("transactionHash"), + Message: statusResponse.GetString("errorMessage"), + ProtocolID: receiptInfo.GetString("protocolId")} + err := common.HandleReceipt(ctx, t, receipt, t.callbacks) + if err != nil { + log.L(ctx).Warnf("Failed to handle receipt") + } + } + } else { + // Don't expect to get here so issue a warning + log.L(ctx).Warnf("Transaction status didn't include txStatus information") + } + + return statusResponse, nil +} + +func (t *Tezos) afterConnect(ctx context.Context, w wsclient.WSClient) error { + // Send a subscribe to our topic after each connect/reconnect + b, _ := json.Marshal(&tezosWSCommandPayload{ + Type: "listen", + Topic: t.pluginTopic, + }) + err := w.Send(ctx, b) + if err == nil { + b, _ = json.Marshal(&tezosWSCommandPayload{ + Type: "listenreplies", + }) + err = w.Send(ctx, b) + } + return err +} + +func (t *Tezos) recoverFFI(ctx context.Context, parsedMethod interface{}) (*fftypes.FFIMethod, []*fftypes.FFIError, error) { + methodInfo, ok := parsedMethod.(*ffiMethodAndErrors) + if !ok || methodInfo.method == nil { + return nil, nil, i18n.NewError(ctx, coremsgs.MsgUnexpectedInterfaceType, parsedMethod) + } + return methodInfo.method, methodInfo.errors, nil +} + +func (t *Tezos) invokeContractMethod(ctx context.Context, address, methodName, signingKey, requestID string, michelsonInput micheline.Parameters, options map[string]interface{}) (submissionRejected bool, err error) { + if t.metrics.IsMetricsEnabled() { + t.metrics.BlockchainTransaction(address, methodName) + } + messageType := "SendTransaction" + body, err := t.buildTezosconnectRequestBody(ctx, messageType, address, methodName, signingKey, requestID, michelsonInput, options) + if err != nil { + return true, err + } + + var resErr common.BlockchainRESTError + res, err := t.client.R(). + SetContext(ctx). + SetBody(body). + SetError(&resErr). + Post("/") + if err != nil || !res.IsSuccess() { + return resErr.SubmissionRejected, common.WrapRESTError(ctx, &resErr, res, err, coremsgs.MsgTezosconnectRESTErr) + } + return false, nil +} + +func (t *Tezos) queryContractMethod(ctx context.Context, address, methodName, signingKey string, michelsonInput micheline.Parameters, options map[string]interface{}) (*resty.Response, error) { + if t.metrics.IsMetricsEnabled() { + t.metrics.BlockchainQuery(address, methodName) + } + messageType := "Query" + body, err := t.buildTezosconnectRequestBody(ctx, messageType, address, methodName, signingKey, "", michelsonInput, options) + if err != nil { + return nil, err + } + + var resErr common.BlockchainRESTError + res, err := t.client.R(). + SetContext(ctx). + SetBody(body). + SetError(&resErr). + Post("/") + if err != nil || !res.IsSuccess() { + return res, common.WrapRESTError(ctx, &resErr, res, err, coremsgs.MsgTezosconnectRESTErr) + } + return res, nil +} + +func (t *Tezos) buildTezosconnectRequestBody(ctx context.Context, messageType, address, methodName, signingKey, requestID string, michelsonInput micheline.Parameters, options map[string]interface{}) (map[string]interface{}, error) { + headers := TezosconnectMessageHeaders{ + Type: messageType, + } + if requestID != "" { + headers.ID = requestID + } + + body := map[string]interface{}{ + "headers": &headers, + "to": address, + "method": methodName, + "params": []interface{}{michelsonInput}, + } + if signingKey != "" { + body["from"] = signingKey + } + + return t.applyOptions(ctx, body, options) +} + +func (t *Tezos) applyOptions(ctx context.Context, body, options map[string]interface{}) (map[string]interface{}, error) { + for k, v := range options { + // Set the new field if it's not already set. Do not allow overriding of existing fields + if _, ok := body[k]; !ok { + body[k] = v + } else { + return nil, i18n.NewError(ctx, coremsgs.MsgOverrideExistingFieldCustomOption, k) + } + } + return body, nil +} + +func (t *Tezos) prepareRequest(ctx context.Context, parsedMethod interface{}, input map[string]interface{}) (string, micheline.Parameters, error) { + method, _, err := t.recoverFFI(ctx, parsedMethod) + if err != nil { + return "", micheline.Parameters{}, err + } + + // Build the payload schema for the method parameters + prefixItems := make([]*PrefixItem, len(method.Params)) + for i, param := range method.Params { + var paramSchema ffiParamSchema + if err := json.Unmarshal(param.Schema.Bytes(), ¶mSchema); err != nil { + return "", micheline.Parameters{}, i18n.WrapError(ctx, err, i18n.MsgJSONObjectParseFailed, fmt.Sprintf("%s.schema", param.Name)) + } + + prefixItems[i] = &PrefixItem{ + Name: param.Name, + Type: paramSchema.Type, + Details: paramSchema.Details, + } + } + + payloadSchema := &PayloadSchema{ + Type: "array", + PrefixItems: prefixItems, + } + + schemaBytes, _ := json.Marshal(payloadSchema) + var processSchemaReq map[string]interface{} + _ = json.Unmarshal(schemaBytes, &processSchemaReq) + + michelineInput, err := processArgs(processSchemaReq, input, method.Name) + + return method.Name, michelineInput, err +} + +func (t *Tezos) parseContractLocation(ctx context.Context, location *fftypes.JSONAny) (*Location, error) { + tezosLocation := Location{} + if err := json.Unmarshal(location.Bytes(), &tezosLocation); err != nil { + return nil, i18n.NewError(ctx, coremsgs.MsgContractLocationInvalid, err) + } + if tezosLocation.Address == "" { + return nil, i18n.NewError(ctx, coremsgs.MsgContractLocationInvalid, "'address' not set") + } + return &tezosLocation, nil +} + +func (t *Tezos) encodeContractLocation(ctx context.Context, location *Location) (result *fftypes.JSONAny, err error) { + location.Address, err = formatTezosAddress(ctx, location.Address) + if err != nil { + return nil, err + } + normalized, err := json.Marshal(location) + if err == nil { + result = fftypes.JSONAnyPtrBytes(normalized) + } + return result, err +} + +func (t *Tezos) startBackgroundLoop() { + _ = t.backgroundRetry.Do(t.ctx, fmt.Sprintf("tezos connector %s", t.Name()), func(attempt int) (retry bool, err error) { + stream, err := t.streams.ensureEventStream(t.ctx, t.pluginTopic) + if err != nil { + return true, err + } + + t.streamID = stream.ID + log.L(t.ctx).Infof("Event stream: %s (topic=%s)", t.streamID, t.pluginTopic) + + err = t.wsconn.Connect() + if err != nil { + return true, err + } + + t.closed = make(chan struct{}) + go t.eventLoop() + + return false, nil + }) +} + +func (t *Tezos) eventLoop() { + defer t.wsconn.Close() + defer close(t.closed) + l := log.L(t.ctx).WithField("role", "event-loop") + ctx := log.WithLogger(t.ctx, l) + for { + select { + case <-ctx.Done(): + l.Debugf("Event loop exiting (context cancelled)") + return + case msgBytes, ok := <-t.wsconn.Receive(): + if !ok { + l.Debugf("Event loop exiting (receive channel closed). Terminating server!") + t.cancelCtx() + return + } + + var msgParsed interface{} + err := json.Unmarshal(msgBytes, &msgParsed) + if err != nil { + l.Errorf("Message cannot be parsed as JSON: %s\n%s", err, string(msgBytes)) + continue // Swallow this and move on + } + switch msgTyped := msgParsed.(type) { + case []interface{}: + err = t.handleMessageBatch(ctx, 0, msgTyped) + if err == nil { + ack, _ := json.Marshal(&tezosWSCommandPayload{ + Type: "ack", + Topic: t.pluginTopic, + }) + err = t.wsconn.Send(ctx, ack) + } + case map[string]interface{}: + var receipt common.BlockchainReceiptNotification + _ = json.Unmarshal(msgBytes, &receipt) + + err := common.HandleReceipt(ctx, t, &receipt, t.callbacks) + if err != nil { + l.Errorf("Failed to process receipt: %+v", msgTyped) + } + default: + l.Errorf("Message unexpected: %+v", msgTyped) + continue + } + + if err != nil { + l.Errorf("Event loop exiting (%s). Terminating server!", err) + t.cancelCtx() + return + } + } + } +} + +func (t *Tezos) handleMessageBatch(ctx context.Context, batchID int64, messages []interface{}) error { + // TODO: + return nil +} + +func formatTezosAddress(ctx context.Context, key string) (string, error) { + if addressVerify.MatchString(key) { + return key, nil + } + return "", i18n.NewError(ctx, coremsgs.MsgInvalidTezosAddress) +} diff --git a/internal/blockchain/tezos/tezos_test.go b/internal/blockchain/tezos/tezos_test.go new file mode 100644 index 0000000000..4f0ec8f960 --- /dev/null +++ b/internal/blockchain/tezos/tezos_test.go @@ -0,0 +1,1884 @@ +// Copyright © 2023 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tezos + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "testing" + "time" + + "github.com/go-resty/resty/v2" + "github.com/hyperledger/firefly-common/pkg/config" + "github.com/hyperledger/firefly-common/pkg/ffresty" + "github.com/hyperledger/firefly-common/pkg/fftls" + "github.com/hyperledger/firefly-common/pkg/fftypes" + "github.com/hyperledger/firefly-common/pkg/retry" + "github.com/hyperledger/firefly-common/pkg/wsclient" + "github.com/hyperledger/firefly/internal/blockchain/common" + "github.com/hyperledger/firefly/internal/cache" + "github.com/hyperledger/firefly/internal/coreconfig" + "github.com/hyperledger/firefly/mocks/blockchainmocks" + "github.com/hyperledger/firefly/mocks/cachemocks" + "github.com/hyperledger/firefly/mocks/coremocks" + "github.com/hyperledger/firefly/mocks/metricsmocks" + "github.com/hyperledger/firefly/mocks/wsmocks" + "github.com/hyperledger/firefly/pkg/blockchain" + "github.com/hyperledger/firefly/pkg/core" + "github.com/jarcoal/httpmock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +var utConfig = config.RootSection("tezos_unit_tests") +var utTezosconnectConf = utConfig.SubSection(TezosconnectConfigKey) +var utAddressResolverConf = utConfig.SubSection(AddressResolverConfigKey) + +func testFFIMethod() *fftypes.FFIMethod { + return &fftypes.FFIMethod{ + Name: "testFunc", + Params: []*fftypes.FFIParam{ + { + Name: "varNat", + Schema: fftypes.JSONAnyPtr(`{"type": "integer", "details":{"type":"integer","internalType":"nat"}}`), + }, + { + Name: "varInt", + Schema: fftypes.JSONAnyPtr(`{"type": "integer", "details":{"type":"integer","internalType":"integer"}}`), + }, + { + Name: "varString", + Schema: fftypes.JSONAnyPtr(`{"type": "string", "details":{"type":"string","internalType":"string"}}`), + }, + { + Name: "varStringOpt", + Schema: fftypes.JSONAnyPtr(`{"type": "string", "details":{"type":"string","internalType":"string","kind": "option"}}`), + }, + { + Name: "varBytes", + Schema: fftypes.JSONAnyPtr(`{"type": "string", "details":{"type":"bytes","internalType":"bytes"}}`), + }, + { + Name: "varBool", + Schema: fftypes.JSONAnyPtr(`{"type": "boolean", "details":{"type":"boolean","internalType":"boolean"}}`), + }, + { + Name: "varAddress", + Schema: fftypes.JSONAnyPtr(`{"type": "string", "details":{"type":"address","internalType":"address"}}`), + }, + }, + } +} + +func resetConf(t *Tezos) { + coreconfig.Reset() + t.InitConfig(utConfig) +} + +func newTestTezos() (*Tezos, func()) { + ctx, cancel := context.WithCancel(context.Background()) + wsm := &wsmocks.WSClient{} + mm := &metricsmocks.Manager{} + mm.On("IsMetricsEnabled").Return(true) + mm.On("BlockchainTransaction", mock.Anything, mock.Anything).Return(nil) + mm.On("BlockchainQuery", mock.Anything, mock.Anything).Return(nil) + t := &Tezos{ + ctx: ctx, + cancelCtx: cancel, + client: resty.New().SetBaseURL("http://localhost:12345"), + pluginTopic: "topic1", + prefixShort: defaultPrefixShort, + prefixLong: defaultPrefixLong, + wsconn: wsm, + metrics: mm, + cache: cache.NewUmanagedCache(ctx, 100, 5*time.Minute), + callbacks: common.NewBlockchainCallbacks(), + subs: common.NewFireflySubscriptions(), + } + return t, func() { + cancel() + if t.closed != nil { + // We've init'd, wait to close + <-t.closed + } + } +} + +func newTestStreamManager(client *resty.Client) *streamManager { + return newStreamManager(client, cache.NewUmanagedCache(context.Background(), 100, 5*time.Minute), defaultBatchSize, defaultBatchTimeout) +} + +func TestInitMissingURL(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(tz.ctx, 100, 5*time.Minute), nil) + + err := tz.Init(tz.ctx, tz.cancelCtx, utConfig, tz.metrics, cmi) + assert.Regexp(t, "FF10138.*url", err) +} + +func TestBadTLSConfig(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + resetConf(tz) + utTezosconnectConf.Set(ffresty.HTTPConfigURL, "http://localhost:12345") + + tlsConf := utTezosconnectConf.SubSection("tls") + tlsConf.Set(fftls.HTTPConfTLSEnabled, true) + tlsConf.Set(fftls.HTTPConfTLSCAFile, "!!!!!badness") + + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(tz.ctx, 100, 5*time.Minute), nil) + + err := tz.Init(tz.ctx, tz.cancelCtx, utConfig, tz.metrics, cmi) + assert.Regexp(t, "FF00153", err) +} + +func TestInitBadAddressResolver(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + resetConf(tz) + utAddressResolverConf.Set(AddressResolverURLTemplate, "{{unclosed}") + + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(tz.ctx, 100, 5*time.Minute), nil) + + err := tz.Init(tz.ctx, tz.cancelCtx, utConfig, tz.metrics, cmi) + assert.Regexp(t, "FF10337.*urlTemplate", err) +} + +func TestInitMissingTopic(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + resetConf(tz) + utTezosconnectConf.Set(ffresty.HTTPConfigURL, "http://localhost:12345") + + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(tz.ctx, 100, 5*time.Minute), nil) + + err := tz.Init(tz.ctx, tz.cancelCtx, utConfig, tz.metrics, cmi) + assert.Regexp(t, "FF10138.*topic", err) +} + +func TestInitAndStartWithTezosConnect(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + toServer, fromServer, wsURL, done := wsclient.NewTestWSServer(nil) + defer done() + + mockedClient := &http.Client{} + httpmock.ActivateNonDefault(mockedClient) + defer httpmock.DeactivateAndReset() + + u, _ := url.Parse(wsURL) + u.Scheme = "http" + httpURL := u.String() + + httpmock.RegisterResponder("GET", fmt.Sprintf("%s/eventstreams", httpURL), + httpmock.NewJsonResponderOrPanic(200, []eventStream{})) + httpmock.RegisterResponder("POST", fmt.Sprintf("%s/eventstreams", httpURL), + httpmock.NewJsonResponderOrPanic(200, eventStream{ID: "es12345"})) + + resetConf(tz) + utTezosconnectConf.Set(ffresty.HTTPConfigURL, httpURL) + utTezosconnectConf.Set(ffresty.HTTPCustomClient, mockedClient) + utTezosconnectConf.Set(TezosconnectConfigTopic, "topic1") + + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(tz.ctx, 100, 5*time.Minute), nil) + err := tz.Init(tz.ctx, tz.cancelCtx, utConfig, tz.metrics, cmi) + assert.NoError(t, err) + + assert.Equal(t, "tezos", tz.Name()) + assert.Equal(t, core.VerifierTypeTezosAddress, tz.VerifierType()) + + assert.NoError(t, err) + + assert.Equal(t, 2, httpmock.GetTotalCallCount()) + assert.Equal(t, "es12345", tz.streamID) + assert.NotNil(t, tz.Capabilities()) + + err = tz.Start() + assert.NoError(t, err) + + startupMessage := <-toServer + assert.Equal(t, `{"type":"listen","topic":"topic1"}`, startupMessage) + startupMessage = <-toServer + assert.Equal(t, `{"type":"listenreplies"}`, startupMessage) + fromServer <- `{"bad":"receipt"}` // will be ignored - no ack + fromServer <- `[]` // empty batch, will be ignored, but acked + reply := <-toServer + assert.Equal(t, `{"type":"ack","topic":"topic1"}`, reply) + fromServer <- `[{}]` // bad batch + + // Bad data will be ignored + fromServer <- `!json` + fromServer <- `{"not": "a reply"}` + fromServer <- `42` +} + +func TestBackgroundStart(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + toServer, fromServer, wsURL, done := wsclient.NewTestWSServer(nil) + defer done() + + mockedClient := &http.Client{} + httpmock.ActivateNonDefault(mockedClient) + defer httpmock.DeactivateAndReset() + + u, _ := url.Parse(wsURL) + u.Scheme = "http" + httpURL := u.String() + + httpmock.RegisterResponder("GET", fmt.Sprintf("%s/eventstreams", httpURL), + httpmock.NewJsonResponderOrPanic(200, []eventStream{})) + httpmock.RegisterResponder("POST", fmt.Sprintf("%s/eventstreams", httpURL), + httpmock.NewJsonResponderOrPanic(200, eventStream{ID: "es12345"})) + + resetConf(tz) + utTezosconnectConf.Set(ffresty.HTTPConfigURL, httpURL) + utTezosconnectConf.Set(ffresty.HTTPCustomClient, mockedClient) + utTezosconnectConf.Set(TezosconnectConfigTopic, "topic1") + utTezosconnectConf.Set(TezosconnectBackgroundStart, true) + + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(tz.ctx, 100, 5*time.Minute), nil) + err := tz.Init(tz.ctx, tz.cancelCtx, utConfig, tz.metrics, cmi) + assert.NoError(t, err) + + assert.Equal(t, "tezos", tz.Name()) + assert.Equal(t, core.VerifierTypeTezosAddress, tz.VerifierType()) + + assert.NoError(t, err) + + assert.NotNil(t, tz.Capabilities()) + + err = tz.Start() + assert.NoError(t, err) + + assert.Eventually(t, func() bool { return httpmock.GetTotalCallCount() == 2 }, time.Second*5, time.Microsecond) + assert.Eventually(t, func() bool { return tz.streamID == "es12345" }, time.Second*5, time.Microsecond) + + startupMessage := <-toServer + assert.Equal(t, `{"type":"listen","topic":"topic1"}`, startupMessage) + startupMessage = <-toServer + assert.Equal(t, `{"type":"listenreplies"}`, startupMessage) + fromServer <- `[]` // empty batch, will be ignored, but acked + reply := <-toServer + assert.Equal(t, `{"type":"ack","topic":"topic1"}`, reply) + + // Bad data will be ignored + fromServer <- `!json` + fromServer <- `{"not": "a reply"}` + fromServer <- `42` +} + +func TestBackgroundStartFail(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + _, _, wsURL, done := wsclient.NewTestWSServer(nil) + defer done() + + mockedClient := &http.Client{} + httpmock.ActivateNonDefault(mockedClient) + defer httpmock.DeactivateAndReset() + + u, _ := url.Parse(wsURL) + u.Scheme = "http" + httpURL := u.String() + + httpmock.RegisterResponder("GET", fmt.Sprintf("%s/eventstreams", httpURL), + httpmock.NewJsonResponderOrPanic(500, "Failed to get eventstreams")) + + resetConf(tz) + utTezosconnectConf.Set(ffresty.HTTPConfigURL, httpURL) + utTezosconnectConf.Set(ffresty.HTTPCustomClient, mockedClient) + utTezosconnectConf.Set(TezosconnectConfigTopic, "topic1") + utTezosconnectConf.Set(TezosconnectBackgroundStart, true) + + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(tz.ctx, 100, 5*time.Minute), nil) + err := tz.Init(tz.ctx, tz.cancelCtx, utConfig, tz.metrics, cmi) + assert.NoError(t, err) + + assert.Equal(t, "tezos", tz.Name()) + assert.Equal(t, core.VerifierTypeTezosAddress, tz.VerifierType()) + + assert.NoError(t, err) + + err = tz.Start() + assert.NoError(t, err) + + capturedErr := make(chan error) + tz.backgroundRetry = &retry.Retry{ + ErrCallback: func(err error) { + capturedErr <- err + }, + } + + err = tz.Start() + assert.NoError(t, err) + + err = <-capturedErr + assert.Regexp(t, "FF10283", err) +} + +func TestBackgroundStartWSFail(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + mockedClient := &http.Client{} + httpmock.ActivateNonDefault(mockedClient) + defer httpmock.DeactivateAndReset() + + u, _ := url.Parse("http://localhost:12345") + u.Scheme = "http" + httpURL := u.String() + + httpmock.RegisterResponder("GET", fmt.Sprintf("%s/eventstreams", httpURL), + httpmock.NewJsonResponderOrPanic(200, []eventStream{})) + httpmock.RegisterResponder("POST", fmt.Sprintf("%s/eventstreams", httpURL), + httpmock.NewJsonResponderOrPanic(200, eventStream{ID: "es12345"})) + + resetConf(tz) + utTezosconnectConf.Set(ffresty.HTTPConfigURL, httpURL) + utTezosconnectConf.Set(ffresty.HTTPCustomClient, mockedClient) + utTezosconnectConf.Set(TezosconnectConfigTopic, "topic1") + utTezosconnectConf.Set(TezosconnectBackgroundStart, true) + utTezosconnectConf.Set(wsclient.WSConfigKeyInitialConnectAttempts, 1) + + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(tz.ctx, 100, 5*time.Minute), nil) + originalContext := tz.ctx + err := tz.Init(tz.ctx, tz.cancelCtx, utConfig, &metricsmocks.Manager{}, cmi) + cmi.AssertCalled(t, "GetCache", cache.NewCacheConfig( + originalContext, + coreconfig.CacheBlockchainLimit, + coreconfig.CacheBlockchainTTL, + "", + )) + assert.NoError(t, err) + + capturedErr := make(chan error) + tz.backgroundRetry = &retry.Retry{ + ErrCallback: func(err error) { + capturedErr <- err + }, + } + + err = tz.Start() + assert.NoError(t, err) + + err = <-capturedErr + assert.Regexp(t, "FF00148", err) +} + +func TestWSInitFail(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + resetConf(tz) + utTezosconnectConf.Set(ffresty.HTTPConfigURL, "!!!://") + utTezosconnectConf.Set(TezosconnectConfigTopic, "topic1") + + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(tz.ctx, 100, 5*time.Minute), nil) + err := tz.Init(tz.ctx, tz.cancelCtx, utConfig, tz.metrics, cmi) + assert.Regexp(t, "FF00149", err) +} + +func TestTezosCacheInitFail(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + mockedClient := &http.Client{} + httpmock.ActivateNonDefault(mockedClient) + defer httpmock.DeactivateAndReset() + + resetConf(tz) + utTezosconnectConf.Set(ffresty.HTTPConfigURL, "http://localhost:12345") + utTezosconnectConf.Set(ffresty.HTTPCustomClient, mockedClient) + utTezosconnectConf.Set(TezosconnectConfigTopic, "topic1") + + cmi := &cachemocks.Manager{} + cacheInitError := errors.New("Initialization error.") + cmi.On("GetCache", mock.Anything).Return(nil, cacheInitError) + + err := tz.Init(tz.ctx, tz.cancelCtx, utConfig, tz.metrics, cmi) + assert.Equal(t, cacheInitError, err) +} + +func TestStreamQueryError(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + mockedClient := &http.Client{} + httpmock.ActivateNonDefault(mockedClient) + defer httpmock.DeactivateAndReset() + + httpmock.RegisterResponder("GET", "http://localhost:12345/eventstreams", + httpmock.NewStringResponder(500, `pop`)) + + resetConf(tz) + utTezosconnectConf.Set(ffresty.HTTPConfigURL, "http://localhost:12345") + utTezosconnectConf.Set(ffresty.HTTPConfigRetryEnabled, false) + utTezosconnectConf.Set(ffresty.HTTPCustomClient, mockedClient) + utTezosconnectConf.Set(TezosconnectConfigTopic, "topic1") + + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(tz.ctx, 100, 5*time.Minute), nil) + err := tz.Init(tz.ctx, tz.cancelCtx, utConfig, tz.metrics, cmi) + assert.Regexp(t, "FF10283.*pop", err) +} + +func TestStreamCreateError(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + mockedClient := &http.Client{} + httpmock.ActivateNonDefault(mockedClient) + defer httpmock.DeactivateAndReset() + + httpmock.RegisterResponder("GET", "http://localhost:12345/eventstreams", + httpmock.NewJsonResponderOrPanic(200, []eventStream{})) + httpmock.RegisterResponder("POST", "http://localhost:12345/eventstreams", + httpmock.NewStringResponder(500, `pop`)) + + resetConf(tz) + utTezosconnectConf.Set(ffresty.HTTPConfigURL, "http://localhost:12345") + utTezosconnectConf.Set(ffresty.HTTPConfigRetryEnabled, false) + utTezosconnectConf.Set(ffresty.HTTPCustomClient, mockedClient) + utTezosconnectConf.Set(TezosconnectConfigTopic, "topic1") + + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(tz.ctx, 100, 5*time.Minute), nil) + err := tz.Init(tz.ctx, tz.cancelCtx, utConfig, tz.metrics, cmi) + assert.Regexp(t, "FF10283.*pop", err) +} + +func TestStreamUpdateError(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + mockedClient := &http.Client{} + httpmock.ActivateNonDefault(mockedClient) + defer httpmock.DeactivateAndReset() + + httpmock.RegisterResponder("GET", "http://localhost:12345/eventstreams", + httpmock.NewJsonResponderOrPanic(200, []eventStream{{ID: "es12345", Name: "topic1"}})) + httpmock.RegisterResponder("PATCH", "http://localhost:12345/eventstreams/es12345", + httpmock.NewStringResponder(500, `pop`)) + + resetConf(tz) + utTezosconnectConf.Set(ffresty.HTTPConfigURL, "http://localhost:12345") + utTezosconnectConf.Set(ffresty.HTTPConfigRetryEnabled, false) + utTezosconnectConf.Set(ffresty.HTTPCustomClient, mockedClient) + utTezosconnectConf.Set(TezosconnectConfigTopic, "topic1") + + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(tz.ctx, 100, 5*time.Minute), nil) + err := tz.Init(tz.ctx, tz.cancelCtx, utConfig, tz.metrics, cmi) + assert.Regexp(t, "FF10283.*pop", err) +} + +func TestInitAllExistingStreams(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + mockedClient := &http.Client{} + httpmock.ActivateNonDefault(mockedClient) + defer httpmock.DeactivateAndReset() + + httpmock.RegisterResponder("GET", "http://localhost:12345/eventstreams", + httpmock.NewJsonResponderOrPanic(200, []eventStream{{ID: "es12345", Name: "topic1"}})) + httpmock.RegisterResponder("GET", "http://localhost:12345/subscriptions", + httpmock.NewJsonResponderOrPanic(200, []subscription{ + {ID: "sub12345", Stream: "es12345", Name: "ns1_BatchPin_4b5431436f737675"}, + })) + httpmock.RegisterResponder("PATCH", "http://localhost:12345/eventstreams/es12345", + httpmock.NewJsonResponderOrPanic(200, &eventStream{ID: "es12345", Name: "topic1"})) + httpmock.RegisterResponder("POST", "http://localhost:12345/subscriptions", + httpmock.NewJsonResponderOrPanic(200, subscription{})) + + resetConf(tz) + utTezosconnectConf.Set(ffresty.HTTPConfigURL, "http://localhost:12345") + utTezosconnectConf.Set(ffresty.HTTPCustomClient, mockedClient) + utTezosconnectConf.Set(TezosconnectConfigTopic, "topic1") + + location := fftypes.JSONAnyPtr(fftypes.JSONObject{ + "address": "KT1CosvuPHD6YnY4uYNguJj6m58UuHJWyS1u", + }.String()) + contract := &blockchain.MultipartyContract{ + Location: location, + FirstEvent: "oldest", + } + + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(tz.ctx, 100, 5*time.Minute), nil) + + err := tz.Init(tz.ctx, tz.cancelCtx, utConfig, tz.metrics, cmi) + assert.NoError(t, err) + + ns := &core.Namespace{Name: "ns1", NetworkName: "ns1"} + _, err = tz.AddFireflySubscription(tz.ctx, ns, contract) + assert.NoError(t, err) + + assert.Equal(t, 3, httpmock.GetTotalCallCount()) + assert.Equal(t, "es12345", tz.streamID) +} + +func TestVerifyTezosAddress(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + _, err := tz.ResolveSigningKey(context.Background(), "tz1err", blockchain.ResolveKeyIntentSign) + assert.Regexp(t, "FF10142", err) + + key, err := tz.ResolveSigningKey(context.Background(), "tz1Y6GnVhC4EpcDDSmD3ibcC4WX6DJ4Q1QLN", blockchain.ResolveKeyIntentSign) + assert.NoError(t, err) + assert.Equal(t, "tz1Y6GnVhC4EpcDDSmD3ibcC4WX6DJ4Q1QLN", key) +} + +func TestEventLoopContextCancelled(t *testing.T) { + tz, cancel := newTestTezos() + cancel() + r := make(<-chan []byte) + wsm := tz.wsconn.(*wsmocks.WSClient) + wsm.On("Receive").Return(r) + wsm.On("Close").Return() + tz.closed = make(chan struct{}) + tz.eventLoop() + wsm.AssertExpectations(t) +} + +func TestEventLoopReceiveClosed(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + r := make(chan []byte) + wsm := tz.wsconn.(*wsmocks.WSClient) + close(r) + wsm.On("Receive").Return((<-chan []byte)(r)) + wsm.On("Close").Return() + tz.closed = make(chan struct{}) + tz.eventLoop() + wsm.AssertExpectations(t) +} + +func TestEventLoopSendClosed(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + s := make(chan []byte, 1) + s <- []byte(`[]`) + r := make(chan []byte) + wsm := tz.wsconn.(*wsmocks.WSClient) + wsm.On("Receive").Return((<-chan []byte)(s)) + wsm.On("Send", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + go cancel() + close(r) + }).Return(fmt.Errorf("pop")) + wsm.On("Close").Return() + tz.closed = make(chan struct{}) + tz.eventLoop() + wsm.AssertExpectations(t) +} + +func TestEventLoopUnexpectedMessage(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + r := make(chan []byte) + wsm := tz.wsconn.(*wsmocks.WSClient) + wsm.On("Receive").Return((<-chan []byte)(r)) + wsm.On("Close").Return() + tz.closed = make(chan struct{}) + operationID := fftypes.NewUUID() + data := []byte(`{ + "_id": "6fb94fff-81d3-4094-567d-e031b1871694", + "errorMessage": "Packing arguments for method 'broadcastBatch': cannot use [3]uint8 as type [32]uint8 as argument", + "headers": { + "id": "3a37b17b-13b6-4dc5-647a-07c11eae0be3", + "requestId": "ns1:` + operationID.String() + `", + "requestOffset": "zzn4y4v4si-zzjjepe9x4-requests:0:0", + "timeElapsed": 0.020969053, + "timeReceived": "2021-05-31T02:35:11.458880504Z", + "type": "Error" + }, + "receivedAt": 1622428511616 + }`) + em := &blockchainmocks.Callbacks{} + tz.SetHandler("ns1", em) + txsu := em.On("BlockchainOpUpdate", + tz, + "ns1:"+operationID.String(), + core.OpStatusFailed, + "", + "Packing arguments for method 'broadcastBatch': cannot use [3]uint8 as type [32]uint8 as argument", + mock.Anything).Return(fmt.Errorf("Shutdown")) + done := make(chan struct{}) + txsu.RunFn = func(a mock.Arguments) { + close(done) + } + + go tz.eventLoop() + r <- []byte(`!badjson`) // ignored bad json + r <- []byte(`"not an object"`) // ignored wrong type + r <- data + tz.ctx.Done() +} + +func TestHandleReceiptTXSuccess(t *testing.T) { + tm := &coremocks.OperationCallbacks{} + wsm := &wsmocks.WSClient{} + tz := &Tezos{ + ctx: context.Background(), + pluginTopic: "topic1", + callbacks: common.NewBlockchainCallbacks(), + wsconn: wsm, + } + tz.SetOperationHandler("ns1", tm) + + var reply common.BlockchainReceiptNotification + operationID := fftypes.NewUUID() + data := fftypes.JSONAnyPtr(`{ + "headers": { + "requestId": "ns1:` + operationID.String() + `", + "type": "TransactionSuccess" + }, + "status": "Succeeded", + "protocolId": "PtNairobiyssHuh87hEhfVBGCVrK3WnS8Z2FT4ymB5tAa4r1nQf", + "transactionHash": "ooGcrcazgcGBrY1iym329ovV13MnWrTmV1fttCwWKH5DiYUQsiq", + "contractLocation": { + "address": "KT1CosvuPHD6YnY4uYNguJj6m58UuHJWyS1u" + } + }`) + + tm.On("OperationUpdate", mock.MatchedBy(func(update *core.OperationUpdate) bool { + return update.NamespacedOpID == "ns1:"+operationID.String() && + update.Status == core.OpStatusSucceeded && + update.BlockchainTXID == "ooGcrcazgcGBrY1iym329ovV13MnWrTmV1fttCwWKH5DiYUQsiq" && + update.Plugin == "tezos" + })).Return(nil) + + err := json.Unmarshal(data.Bytes(), &reply) + assert.NoError(t, err) + + common.HandleReceipt(context.Background(), tz, &reply, tz.callbacks) + + tm.AssertExpectations(t) +} + +func TestHandleReceiptTXUpdateTezosConnect(t *testing.T) { + tm := &coremocks.OperationCallbacks{} + wsm := &wsmocks.WSClient{} + tz := &Tezos{ + ctx: context.Background(), + pluginTopic: "topic1", + callbacks: common.NewBlockchainCallbacks(), + wsconn: wsm, + } + tz.SetOperationHandler("ns1", tm) + + var reply common.BlockchainReceiptNotification + operationID := fftypes.NewUUID() + data := fftypes.JSONAnyPtr(`{ + "created": "2023-09-10T14:49:31.147376Z", + "firstSubmit": "2023-09-10T14:49:31.79751Z", + "from": "tz1eXM1uGi5THR7Aj8VnkteA5nrBmPyKAufM", + "gasPrice": 0, + "headers": { + "requestId": "ns1:` + operationID.String() + `", + "type": "TransactionUpdate" + }, + "id": "ns1:` + operationID.String() + `", + "lastSubmit": "2023-09-10T14:49:31.79751Z", + "nonce": "1", + "policyInfo": {}, + "receipt": { + "blockHash": "BKp3gNDyJygbAKNmdJnmJjLX6y6BrA2dcJVXJ3zMfkX8gA63rH3", + "blockNumber": "3835591", + "contractLocation": { + "address": "KT1CosvuPHD6YnY4uYNguJj6m58UuHJWyS1u" + }, + "extraInfo": { + "consumedGas": "387", + "contractAddress": "KT1CosvuPHD6YnY4uYNguJj6m58UuHJWyS1u", + "counter": "18602182", + "errorMessage": null, + "fee": "313", + "from": "tz1eXM1uGi5THR7Aj8VnkteA5nrBmPyKAufM", + "gasLimit": "487", + "paidStorageSizeDiff": "0", + "status": "applied", + "storage": { + "admin": "tz1eXM1uGi5THR7Aj8VnkteA5nrBmPyKAufM", + "destroyed": false, + "last_token_id": "1", + "paused": false + }, + "storageLimit": "0", + "storageSize": "10380", + "to": "KT1CosvuPHD6YnY4uYNguJj6m58UuHJWyS1u" + }, + "protocolId": "PtNairobiyssHuh87hEhfVBGCVrK3WnS8Z2FT4ymB5tAa4r1nQf", + "success": true, + "transactionIndex": "0" + }, + "sequenceId": "018a7f91-b90b-fb45-9f7d-0956b3280c1d", + "status": "Succeeded", + "to": "KT1CosvuPHD6YnY4uYNguJj6m58UuHJWyS1u", + "transactionData": "ff0003d4072e3ece4cbbda5b3f8b0c5b6567520ad5eab21d71ff27595f735cba6c00cf26c62d8a29ab128972a52c46bb6099a2d3675900c6b1ef08000000012e5b393218d67f74660c2cedec8e8bcfa9607d8100ffff057061757365000000020303", + "transactionHash": "onhZJDmz5JihnW1RaZ96f17FgUBv3GoERkRECK3XVFt1kL5E6Yy", + "transactionHeaders": { + "from": "tz1eXM1uGi5THR7Aj8VnkteA5nrBmPyKAufM", + "nonce": "1", + "to": "KT1CosvuPHD6YnY4uYNguJj6m58UuHJWyS1u" + }, + "updated": "2023-09-10T14:49:36.030604Z" + }`) + + tm.On("OperationUpdate", mock.MatchedBy(func(update *core.OperationUpdate) bool { + return update.NamespacedOpID == "ns1:"+operationID.String() && + update.Status == core.OpStatusPending && + update.BlockchainTXID == "onhZJDmz5JihnW1RaZ96f17FgUBv3GoERkRECK3XVFt1kL5E6Yy" && + update.Plugin == "tezos" + })).Return(nil) + + err := json.Unmarshal(data.Bytes(), &reply) + assert.NoError(t, err) + expectedReceiptId := "ns1:" + operationID.String() + assert.Equal(t, reply.Headers.ReceiptID, expectedReceiptId) + common.HandleReceipt(context.Background(), tz, &reply, tz.callbacks) + + tm.AssertExpectations(t) +} + +func TestHandleMsgBatchBadData(t *testing.T) { + wsm := &wsmocks.WSClient{} + tz := &Tezos{ + ctx: context.Background(), + pluginTopic: "topic1", + wsconn: wsm, + } + + var reply common.BlockchainReceiptNotification + data := fftypes.JSONAnyPtr(`{}`) + err := json.Unmarshal(data.Bytes(), &reply) + assert.NoError(t, err) + common.HandleReceipt(context.Background(), tz, &reply, tz.callbacks) +} + +func TestAddSubscription(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + httpmock.ActivateNonDefault(tz.client.GetClient()) + defer httpmock.DeactivateAndReset() + tz.streamID = "es-1" + tz.streams = &streamManager{ + client: tz.client, + } + + sub := &core.ContractListener{ + Location: fftypes.JSONAnyPtr(fftypes.JSONObject{ + "address": "KT123", + }.String()), + Event: &core.FFISerializedEvent{ + FFIEventDefinition: fftypes.FFIEventDefinition{ + Name: "Changed", + }, + }, + Options: &core.ContractListenerOptions{ + FirstEvent: string(core.SubOptsFirstEventOldest), + }, + } + + httpmock.RegisterResponder("POST", `http://localhost:12345/subscriptions`, + httpmock.NewJsonResponderOrPanic(200, &subscription{})) + + err := tz.AddContractListener(context.Background(), sub) + + assert.NoError(t, err) +} + +func TestAddSubscriptionWithoutLocation(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + httpmock.ActivateNonDefault(tz.client.GetClient()) + defer httpmock.DeactivateAndReset() + tz.streamID = "es-1" + tz.streams = &streamManager{ + client: tz.client, + } + + sub := &core.ContractListener{ + Event: &core.FFISerializedEvent{ + FFIEventDefinition: fftypes.FFIEventDefinition{ + Name: "Changed", + }, + }, + Options: &core.ContractListenerOptions{ + FirstEvent: string(core.SubOptsFirstEventNewest), + }, + } + + httpmock.RegisterResponder("POST", `http://localhost:12345/subscriptions`, + httpmock.NewJsonResponderOrPanic(200, &subscription{})) + + err := tz.AddContractListener(context.Background(), sub) + + assert.NoError(t, err) +} + +func TestAddSubscriptionBadLocation(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + httpmock.ActivateNonDefault(tz.client.GetClient()) + defer httpmock.DeactivateAndReset() + + tz.streamID = "es-1" + tz.streams = &streamManager{ + client: tz.client, + } + + sub := &core.ContractListener{ + Location: fftypes.JSONAnyPtr(""), + Event: &core.FFISerializedEvent{}, + } + + err := tz.AddContractListener(context.Background(), sub) + assert.Regexp(t, "FF10310", err) +} + +func TestAddSubscriptionFail(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + httpmock.ActivateNonDefault(tz.client.GetClient()) + defer httpmock.DeactivateAndReset() + + tz.streamID = "es-1" + tz.streams = &streamManager{ + client: tz.client, + } + + sub := &core.ContractListener{ + Location: fftypes.JSONAnyPtr(fftypes.JSONObject{ + "address": "KT123", + }.String()), + Event: &core.FFISerializedEvent{}, + Options: &core.ContractListenerOptions{ + FirstEvent: string(core.SubOptsFirstEventNewest), + }, + } + + httpmock.RegisterResponder("POST", `http://localhost:12345/subscriptions`, + httpmock.NewStringResponder(500, "pop")) + + err := tz.AddContractListener(context.Background(), sub) + + assert.Regexp(t, "FF10283.*pop", err) +} + +func TestDeleteSubscription(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + httpmock.ActivateNonDefault(tz.client.GetClient()) + defer httpmock.DeactivateAndReset() + + tz.streamID = "es-1" + tz.streams = &streamManager{ + client: tz.client, + } + + sub := &core.ContractListener{ + BackendID: "sb-1", + } + + httpmock.RegisterResponder("DELETE", `http://localhost:12345/subscriptions/sb-1`, + httpmock.NewStringResponder(204, "")) + + err := tz.DeleteContractListener(context.Background(), sub, true) + assert.NoError(t, err) +} + +func TestDeleteSubscriptionFail(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + httpmock.ActivateNonDefault(tz.client.GetClient()) + defer httpmock.DeactivateAndReset() + + tz.streamID = "es-1" + tz.streams = &streamManager{ + client: tz.client, + } + + sub := &core.ContractListener{ + BackendID: "sb-1", + } + + httpmock.RegisterResponder("DELETE", `http://localhost:12345/subscriptions/sb-1`, + httpmock.NewStringResponder(500, "")) + + err := tz.DeleteContractListener(context.Background(), sub, true) + assert.Regexp(t, "FF10283", err) +} + +func TestDeleteSubscriptionNotFound(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + httpmock.ActivateNonDefault(tz.client.GetClient()) + defer httpmock.DeactivateAndReset() + + tz.streamID = "es-1" + tz.streams = &streamManager{ + client: tz.client, + } + + sub := &core.ContractListener{ + BackendID: "sb-1", + } + + httpmock.RegisterResponder("DELETE", `http://localhost:12345/subscriptions/sb-1`, + httpmock.NewStringResponder(404, "")) + + err := tz.DeleteContractListener(context.Background(), sub, true) + assert.NoError(t, err) +} + +func TestDeployContractOK(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + httpmock.ActivateNonDefault(tz.client.GetClient()) + defer httpmock.DeactivateAndReset() + signingKey := "tz1Y6GnVhC4EpcDDSmD3ibcC4WX6DJ4Q1QLN" + options := map[string]interface{}{} + input := []interface{}{} + definitionBytes, err := json.Marshal([]interface{}{}) + contractBytes, err := json.Marshal("KT123") + assert.NoError(t, err) + + _, err = tz.DeployContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(definitionBytes), fftypes.JSONAnyPtrBytes(contractBytes), input, options) + assert.Regexp(t, "FF10429", err) +} + +func TestInvokeContractOK(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + httpmock.ActivateNonDefault(tz.client.GetClient()) + defer httpmock.DeactivateAndReset() + signingKey := "tz1Y6GnVhC4EpcDDSmD3ibcC4WX6DJ4Q1QLN" + location := &Location{ + Address: "KT12345", + } + options := map[string]interface{}{ + "customOption": "customValue", + } + method := testFFIMethod() + params := map[string]interface{}{ + "varNat": float64(1), + "varInt": float64(2), + "varString": "str", + "varStringOpt": "optional str", + "varBytes": "0xAA", + "varBool": true, + "varAddress": "tz1Y6GnVhC4EpcDDSmD3ibcC4WX6DJ4Q1QLN", + } + locationBytes, err := json.Marshal(location) + assert.NoError(t, err) + + httpmock.RegisterResponder("POST", `http://localhost:12345/`, + func(req *http.Request) (*http.Response, error) { + var body map[string]interface{} + json.NewDecoder(req.Body).Decode(&body) + params := body["params"].([]interface{}) + michelineParams := params[0].(map[string]interface{}) + headers := body["headers"].(map[string]interface{}) + assert.Equal(t, "SendTransaction", headers["type"]) + assert.Equal(t, "testFunc", michelineParams["entrypoint"].(string)) + assert.Equal(t, 7, len(michelineParams["value"].([]interface{}))) + assert.Equal(t, body["customOption"].(string), "customValue") + return httpmock.NewJsonResponderOrPanic(200, "")(req) + }) + + parsedMethod, err := tz.ParseInterface(context.Background(), method, nil) + assert.NoError(t, err) + + _, err = tz.InvokeContract(context.Background(), "opID", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options, nil) + assert.NoError(t, err) +} + +func TestInvokeContractInvalidOption(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + httpmock.ActivateNonDefault(tz.client.GetClient()) + defer httpmock.DeactivateAndReset() + signingKey := "tz1Y6GnVhC4EpcDDSmD3ibcC4WX6DJ4Q1QLN" + location := &Location{ + Address: "KT12345", + } + options := map[string]interface{}{ + "params": "shouldn't be allowed", + } + method := testFFIMethod() + params := map[string]interface{}{ + "varNat": float64(1), + "varInt": float64(2), + "varString": "str", + "varStringOpt": "optional str", + "varBytes": "0xAA", + "varBool": true, + "varAddress": "tz1Y6GnVhC4EpcDDSmD3ibcC4WX6DJ4Q1QLN", + } + locationBytes, err := json.Marshal(location) + assert.NoError(t, err) + + httpmock.RegisterResponder("POST", `http://localhost:12345/`, + func(req *http.Request) (*http.Response, error) { + var body map[string]interface{} + json.NewDecoder(req.Body).Decode(&body) + params := body["params"].([]interface{}) + michelineParams := params[0].(map[string]interface{}) + headers := body["headers"].(map[string]interface{}) + assert.Equal(t, "SendTransaction", headers["type"]) + assert.Equal(t, "testFunc", michelineParams["entrypoint"].(string)) + assert.Equal(t, 7, len(michelineParams["value"].([]interface{}))) + assert.Equal(t, body["customOption"].(string), "customValue") + return httpmock.NewJsonResponderOrPanic(200, "")(req) + }) + + parsedMethod, err := tz.ParseInterface(context.Background(), method, nil) + assert.NoError(t, err) + + _, err = tz.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options, nil) + assert.Regexp(t, "FF10398", err) +} + +func TestInvokeContractBadSchema(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + httpmock.ActivateNonDefault(tz.client.GetClient()) + defer httpmock.DeactivateAndReset() + signingKey := "tz1Y6GnVhC4EpcDDSmD3ibcC4WX6DJ4Q1QLN" + location := &Location{ + Address: "KT12345", + } + options := map[string]interface{}{} + method := &fftypes.FFIMethod{ + Name: "sum", + Params: []*fftypes.FFIParam{ + { + Name: "varInt", + Schema: fftypes.JSONAnyPtr(`{not json]`), + }, + }, + Returns: []*fftypes.FFIParam{}, + } + params := map[string]interface{}{ + "varInt": float64(2), + } + locationBytes, err := json.Marshal(location) + assert.NoError(t, err) + + parsedMethod, err := tz.ParseInterface(context.Background(), method, nil) + assert.NoError(t, err) + + _, err = tz.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options, nil) + assert.Regexp(t, "FF00127", err) +} + +func TestInvokeContractAddressNotSet(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + httpmock.ActivateNonDefault(tz.client.GetClient()) + defer httpmock.DeactivateAndReset() + signingKey := "tz1Y6GnVhC4EpcDDSmD3ibcC4WX6DJ4Q1QLN" + location := &Location{} + options := map[string]interface{}{} + method := testFFIMethod() + params := map[string]interface{}{ + "varNat": float64(1), + "varInt": float64(2), + "varString": "str", + "varStringOpt": "optional str", + "varBytes": "0xAA", + "varBool": true, + "varAddress": "tz1Y6GnVhC4EpcDDSmD3ibcC4WX6DJ4Q1QLN", + } + locationBytes, err := json.Marshal(location) + assert.NoError(t, err) + + parsedMethod, err := tz.ParseInterface(context.Background(), method, nil) + assert.NoError(t, err) + + _, err = tz.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options, nil) + assert.Regexp(t, "'address' not set", err) +} + +func TestInvokeContractTezosconnectError(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + httpmock.ActivateNonDefault(tz.client.GetClient()) + defer httpmock.DeactivateAndReset() + signingKey := "tz1Y6GnVhC4EpcDDSmD3ibcC4WX6DJ4Q1QLN" + location := &Location{ + Address: "KT12345", + } + options := map[string]interface{}{} + method := testFFIMethod() + params := map[string]interface{}{ + "varNat": float64(1), + "varInt": float64(2), + "varString": "str", + "varStringOpt": "optional str", + "varBytes": "0xAA", + "varBool": true, + "varAddress": "tz1Y6GnVhC4EpcDDSmD3ibcC4WX6DJ4Q1QLN", + } + locationBytes, err := json.Marshal(location) + assert.NoError(t, err) + + httpmock.RegisterResponder("POST", `http://localhost:12345/`, + func(req *http.Request) (*http.Response, error) { + return httpmock.NewJsonResponderOrPanic(400, "")(req) + }) + + parsedMethod, err := tz.ParseInterface(context.Background(), method, nil) + assert.NoError(t, err) + + _, err = tz.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options, nil) + assert.Regexp(t, "FF10283", err) +} + +func TestInvokeContractPrepareFail(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + httpmock.ActivateNonDefault(tz.client.GetClient()) + defer httpmock.DeactivateAndReset() + signingKey := "tz1Y6GnVhC4EpcDDSmD3ibcC4WX6DJ4Q1QLN" + location := &Location{ + Address: "KT12345", + } + options := map[string]interface{}{} + params := map[string]interface{}{ + "varNat": float64(1), + } + locationBytes, err := json.Marshal(location) + assert.NoError(t, err) + + _, err = tz.InvokeContract(context.Background(), "", signingKey, fftypes.JSONAnyPtrBytes(locationBytes), "wrong", params, options, nil) + assert.Regexp(t, "FF10457", err) +} + +func TestQueryContractOK(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + httpmock.ActivateNonDefault(tz.client.GetClient()) + defer httpmock.DeactivateAndReset() + location := &Location{ + Address: "KT12345", + } + options := map[string]interface{}{ + "customOption": "customValue", + } + method := testFFIMethod() + params := map[string]interface{}{ + "varNat": float64(1), + "varInt": float64(2), + "varString": "str", + "varStringOpt": "optional str", + "varBytes": "0xAA", + "varBool": true, + "varAddress": "tz1Y6GnVhC4EpcDDSmD3ibcC4WX6DJ4Q1QLN", + } + locationBytes, err := json.Marshal(location) + assert.NoError(t, err) + + httpmock.RegisterResponder("POST", `http://localhost:12345/`, + func(req *http.Request) (*http.Response, error) { + var body map[string]interface{} + json.NewDecoder(req.Body).Decode(&body) + headers := body["headers"].(map[string]interface{}) + assert.Equal(t, "Query", headers["type"]) + assert.Equal(t, "KT12345", body["to"].(string)) + assert.Equal(t, "tz12345", body["from"].(string)) + assert.Equal(t, body["customOption"].(string), "customValue") + return httpmock.NewJsonResponderOrPanic(200, "result")(req) + }) + + parsedMethod, err := tz.ParseInterface(context.Background(), method, nil) + assert.NoError(t, err) + + result, err := tz.QueryContract(context.Background(), "tz12345", fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options) + assert.NoError(t, err) + + j, err := json.Marshal(result) + assert.NoError(t, err) + assert.Equal(t, `"result"`, string(j)) +} + +func TestQueryContractInvalidOption(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + httpmock.ActivateNonDefault(tz.client.GetClient()) + defer httpmock.DeactivateAndReset() + location := &Location{ + Address: "KT12345", + } + options := map[string]interface{}{ + "params": "shouldn't be allowed", + } + method := testFFIMethod() + params := map[string]interface{}{ + "varNat": float64(1), + "varInt": float64(2), + "varString": "str", + "varStringOpt": "optional str", + "varBytes": "0xAA", + "varBool": true, + "varAddress": "tz1Y6GnVhC4EpcDDSmD3ibcC4WX6DJ4Q1QLN", + } + locationBytes, err := json.Marshal(location) + assert.NoError(t, err) + + parsedMethod, err := tz.ParseInterface(context.Background(), method, nil) + assert.NoError(t, err) + + _, err = tz.QueryContract(context.Background(), "tz12345", fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options) + assert.Regexp(t, "FF10398", err) +} + +func TestQueryContractErrorPrepare(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + httpmock.ActivateNonDefault(tz.client.GetClient()) + defer httpmock.DeactivateAndReset() + + location := &Location{ + Address: "KT12345", + } + options := map[string]interface{}{} + params := map[string]interface{}{ + "varNat": float64(1), + } + locationBytes, err := json.Marshal(location) + assert.NoError(t, err) + + _, err = tz.QueryContract(context.Background(), "tz12345", fftypes.JSONAnyPtrBytes(locationBytes), "wrong", params, options) + assert.Regexp(t, "FF10457", err) +} + +func TestQueryContractAddressNotSet(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + httpmock.ActivateNonDefault(tz.client.GetClient()) + defer httpmock.DeactivateAndReset() + location := &Location{} + options := map[string]interface{}{} + method := testFFIMethod() + params := map[string]interface{}{ + "varNat": float64(1), + "varInt": float64(2), + "varString": "str", + "varStringOpt": "optional str", + "varBytes": "0xAA", + "varBool": true, + "varAddress": "tz1Y6GnVhC4EpcDDSmD3ibcC4WX6DJ4Q1QLN", + } + locationBytes, err := json.Marshal(location) + assert.NoError(t, err) + + parsedMethod, err := tz.ParseInterface(context.Background(), method, nil) + assert.NoError(t, err) + + _, err = tz.QueryContract(context.Background(), "tz12345", fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options) + assert.Regexp(t, "'address' not set", err) +} + +func TestQueryContractTezosconnectError(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + httpmock.ActivateNonDefault(tz.client.GetClient()) + defer httpmock.DeactivateAndReset() + location := &Location{ + Address: "KT12345", + } + options := map[string]interface{}{} + method := testFFIMethod() + params := map[string]interface{}{ + "varNat": float64(1), + "varInt": float64(2), + "varString": "str", + "varStringOpt": "optional str", + "varBytes": "0xAA", + "varBool": true, + "varAddress": "tz1Y6GnVhC4EpcDDSmD3ibcC4WX6DJ4Q1QLN", + } + locationBytes, err := json.Marshal(location) + assert.NoError(t, err) + + httpmock.RegisterResponder("POST", `http://localhost:12345/`, + func(req *http.Request) (*http.Response, error) { + return httpmock.NewJsonResponderOrPanic(400, "")(req) + }) + + parsedMethod, err := tz.ParseInterface(context.Background(), method, nil) + assert.NoError(t, err) + + _, err = tz.QueryContract(context.Background(), "tz12345", fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options) + assert.Regexp(t, "FF10283", err) +} + +func TestQueryContractUnmarshalResponseError(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + httpmock.ActivateNonDefault(tz.client.GetClient()) + defer httpmock.DeactivateAndReset() + location := &Location{ + Address: "KT12345", + } + options := map[string]interface{}{} + method := testFFIMethod() + params := map[string]interface{}{ + "varNat": float64(1), + "varInt": float64(2), + "varString": "str", + "varStringOpt": "optional str", + "varBytes": "0xAA", + "varBool": true, + "varAddress": "tz1Y6GnVhC4EpcDDSmD3ibcC4WX6DJ4Q1QLN", + } + locationBytes, err := json.Marshal(location) + assert.NoError(t, err) + + httpmock.RegisterResponder("POST", `http://localhost:12345/`, + func(req *http.Request) (*http.Response, error) { + var body map[string]interface{} + json.NewDecoder(req.Body).Decode(&body) + headers := body["headers"].(map[string]interface{}) + assert.Equal(t, "Query", headers["type"]) + return httpmock.NewStringResponder(200, "[definitely not JSON}")(req) + }) + + parsedMethod, err := tz.ParseInterface(context.Background(), method, nil) + assert.NoError(t, err) + + _, err = tz.QueryContract(context.Background(), "tz12345", fftypes.JSONAnyPtrBytes(locationBytes), parsedMethod, params, options) + assert.Regexp(t, "invalid character", err) +} + +func TestGetFFIParamValidator(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + _, err := tz.GetFFIParamValidator(context.Background()) + assert.NoError(t, err) +} + +func TestGenerateFFI(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + _, err := tz.GenerateFFI(context.Background(), &fftypes.FFIGenerationRequest{ + Name: "Simple", + Version: "v0.0.1", + Description: "desc", + Input: fftypes.JSONAnyPtr(`[]`), + }) + assert.Regexp(t, "FF10347", err) +} + +func TestConvertDeprecatedContractConfigNoChaincode(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + _, _, err := tz.GetAndConvertDeprecatedContractConfig(tz.ctx) + assert.NoError(t, err) +} + +func TestNormalizeContractLocation(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + location := &Location{ + Address: "KT1CosvuPHD6YnY4uYNguJj6m58UuHJWyS1u", + } + locationBytes, err := json.Marshal(location) + assert.NoError(t, err) + _, err = tz.NormalizeContractLocation(context.Background(), blockchain.NormalizeCall, fftypes.JSONAnyPtrBytes(locationBytes)) + assert.NoError(t, err) +} + +func TestNormalizeContractLocationInvalid(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + location := &Location{ + Address: "wrong", + } + locationBytes, err := json.Marshal(location) + assert.NoError(t, err) + _, err = tz.NormalizeContractLocation(context.Background(), blockchain.NormalizeCall, fftypes.JSONAnyPtrBytes(locationBytes)) + assert.Regexp(t, "FF10142", err) +} + +func TestNormalizeContractLocationBlank(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + location := &Location{} + locationBytes, err := json.Marshal(location) + assert.NoError(t, err) + _, err = tz.NormalizeContractLocation(context.Background(), blockchain.NormalizeCall, fftypes.JSONAnyPtrBytes(locationBytes)) + assert.Regexp(t, "FF10310", err) +} + +func TestGenerateEventSignature(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + signature := tz.GenerateEventSignature(context.Background(), &fftypes.FFIEventDefinition{Name: "Changed"}) + assert.Equal(t, "Changed", signature) +} + +func TestAddSubBadLocation(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + location := fftypes.JSONAnyPtr(fftypes.JSONObject{ + "bad": "bad", + }.String()) + contract := &blockchain.MultipartyContract{ + Location: location, + FirstEvent: "oldest", + } + + ns := &core.Namespace{Name: "ns1", NetworkName: "ns1"} + _, err := tz.AddFireflySubscription(tz.ctx, ns, contract) + assert.Regexp(t, "FF10310", err) +} + +func TestAddAndRemoveFireflySubscription(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + mockedClient := &http.Client{} + httpmock.ActivateNonDefault(mockedClient) + defer httpmock.DeactivateAndReset() + + httpmock.RegisterResponder("GET", "http://localhost:12345/eventstreams", + httpmock.NewJsonResponderOrPanic(200, []eventStream{})) + httpmock.RegisterResponder("POST", "http://localhost:12345/eventstreams", + httpmock.NewJsonResponderOrPanic(200, eventStream{ID: "es12345"})) + httpmock.RegisterResponder("GET", "http://localhost:12345/subscriptions", + httpmock.NewJsonResponderOrPanic(200, []subscription{})) + httpmock.RegisterResponder("POST", "http://localhost:12345/subscriptions", + httpmock.NewJsonResponderOrPanic(200, subscription{ + ID: "sub1", + })) + + resetConf(tz) + utTezosconnectConf.Set(ffresty.HTTPConfigURL, "http://localhost:12345") + utTezosconnectConf.Set(ffresty.HTTPCustomClient, mockedClient) + utTezosconnectConf.Set(TezosconnectConfigTopic, "topic1") + + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(tz.ctx, 100, 5*time.Minute), nil) + originalContext := tz.ctx + err := tz.Init(tz.ctx, tz.cancelCtx, utConfig, tz.metrics, cmi) + cmi.AssertCalled(t, "GetCache", cache.NewCacheConfig( + originalContext, + coreconfig.CacheBlockchainLimit, + coreconfig.CacheBlockchainTTL, + "", + )) + assert.NoError(t, err) + location := fftypes.JSONAnyPtr(fftypes.JSONObject{ + "address": "KT123", + }.String()) + contract := &blockchain.MultipartyContract{ + Location: location, + FirstEvent: "newest", + } + + ns := &core.Namespace{Name: "ns1", NetworkName: "ns1"} + subID, err := tz.AddFireflySubscription(tz.ctx, ns, contract) + assert.NoError(t, err) + assert.NotNil(t, tz.subs.GetSubscription("sub1")) + + tz.RemoveFireflySubscription(tz.ctx, subID) + assert.Nil(t, tz.subs.GetSubscription("sub1")) +} + +func TestAddFireflySubscriptionQuerySubsFail(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + mockedClient := &http.Client{} + httpmock.ActivateNonDefault(mockedClient) + defer httpmock.DeactivateAndReset() + + httpmock.RegisterResponder("GET", "http://localhost:12345/eventstreams", + httpmock.NewJsonResponderOrPanic(200, []eventStream{})) + httpmock.RegisterResponder("POST", "http://localhost:12345/eventstreams", + httpmock.NewJsonResponderOrPanic(200, eventStream{ID: "es12345"})) + httpmock.RegisterResponder("GET", "http://localhost:12345/subscriptions", + httpmock.NewStringResponder(500, `pop`)) + httpmock.RegisterResponder("POST", "http://localhost:12345/subscriptions", + httpmock.NewJsonResponderOrPanic(200, subscription{})) + + resetConf(tz) + utTezosconnectConf.Set(ffresty.HTTPConfigURL, "http://localhost:12345") + utTezosconnectConf.Set(ffresty.HTTPCustomClient, mockedClient) + utTezosconnectConf.Set(TezosconnectConfigTopic, "topic1") + + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(tz.ctx, 100, 5*time.Minute), nil) + err := tz.Init(tz.ctx, tz.cancelCtx, utConfig, tz.metrics, cmi) + assert.NoError(t, err) + + location := fftypes.JSONAnyPtr(fftypes.JSONObject{ + "address": "KT123", + }.String()) + contract := &blockchain.MultipartyContract{ + Location: location, + FirstEvent: "oldest", + } + + ns := &core.Namespace{Name: "ns1", NetworkName: "ns1"} + _, err = tz.AddFireflySubscription(tz.ctx, ns, contract) + assert.Regexp(t, "FF10283", err) +} + +func TestAddFireflySubscriptionCreateError(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + mockedClient := &http.Client{} + httpmock.ActivateNonDefault(mockedClient) + defer httpmock.DeactivateAndReset() + + httpmock.RegisterResponder("GET", "http://localhost:12345/eventstreams", + httpmock.NewJsonResponderOrPanic(200, []eventStream{})) + httpmock.RegisterResponder("POST", "http://localhost:12345/eventstreams", + httpmock.NewJsonResponderOrPanic(200, eventStream{ID: "es12345"})) + httpmock.RegisterResponder("GET", "http://localhost:12345/subscriptions", + httpmock.NewJsonResponderOrPanic(200, []subscription{})) + httpmock.RegisterResponder("POST", "http://localhost:12345/subscriptions", + httpmock.NewJsonResponderOrPanic(500, `pop`)) + + resetConf(tz) + utTezosconnectConf.Set(ffresty.HTTPConfigURL, "http://localhost:12345") + utTezosconnectConf.Set(ffresty.HTTPCustomClient, mockedClient) + utTezosconnectConf.Set(TezosconnectConfigTopic, "topic1") + + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(tz.ctx, 100, 5*time.Minute), nil) + err := tz.Init(tz.ctx, tz.cancelCtx, utConfig, tz.metrics, cmi) + assert.NoError(t, err) + + location := fftypes.JSONAnyPtr(fftypes.JSONObject{ + "address": "KT123", + }.String()) + contract := &blockchain.MultipartyContract{ + Location: location, + FirstEvent: "oldest", + } + + ns := &core.Namespace{Name: "ns1", NetworkName: "ns1"} + _, err = tz.AddFireflySubscription(tz.ctx, ns, contract) + assert.Regexp(t, "FF10283", err) +} + +func TestGetContractListenerStatus(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + mockedClient := &http.Client{} + httpmock.ActivateNonDefault(mockedClient) + defer httpmock.DeactivateAndReset() + + checkpoint := ListenerCheckpoint{ + Block: 0, + TransactionBatchIndex: -1, + TransactionIndex: -1, + MetaInternalResultIndex: -1, + } + + httpmock.RegisterResponder("GET", "http://localhost:12345/eventstreams", + httpmock.NewJsonResponderOrPanic(200, []eventStream{})) + httpmock.RegisterResponder("POST", "http://localhost:12345/eventstreams", + httpmock.NewJsonResponderOrPanic(200, eventStream{ID: "es12345"})) + httpmock.RegisterResponder("GET", "http://localhost:12345/subscriptions", + httpmock.NewJsonResponderOrPanic(200, []subscription{})) + httpmock.RegisterResponder("GET", "http://localhost:12345/subscriptions/sub1", + httpmock.NewJsonResponderOrPanic(200, subscription{ + ID: "sub1", Stream: "es12345", Name: "ff-sub-1132312312312", subscriptionCheckpoint: subscriptionCheckpoint{ + Catchup: false, + Checkpoint: checkpoint, + }, + })) + + resetConf(tz) + utTezosconnectConf.Set(ffresty.HTTPConfigURL, "http://localhost:12345") + utTezosconnectConf.Set(ffresty.HTTPCustomClient, mockedClient) + utTezosconnectConf.Set(TezosconnectConfigTopic, "topic1") + + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(tz.ctx, 100, 5*time.Minute), nil) + err := tz.Init(tz.ctx, tz.cancelCtx, utConfig, tz.metrics, cmi) + assert.NoError(t, err) + + found, status, err := tz.GetContractListenerStatus(context.Background(), "sub1", true) + assert.NotNil(t, status) + assert.NoError(t, err) + assert.True(t, found) +} + +func TestGetContractListenerStatusGetSubFail(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + mockedClient := &http.Client{} + httpmock.ActivateNonDefault(mockedClient) + defer httpmock.DeactivateAndReset() + + httpmock.RegisterResponder("GET", "http://localhost:12345/eventstreams", + httpmock.NewJsonResponderOrPanic(200, []eventStream{})) + httpmock.RegisterResponder("POST", "http://localhost:12345/eventstreams", + httpmock.NewJsonResponderOrPanic(200, eventStream{ID: "es12345"})) + httpmock.RegisterResponder("GET", "http://localhost:12345/subscriptions", + httpmock.NewJsonResponderOrPanic(200, []subscription{})) + httpmock.RegisterResponder("GET", "http://localhost:12345/subscriptions/sub1", + httpmock.NewJsonResponderOrPanic(500, `pop`)) + + resetConf(tz) + utTezosconnectConf.Set(ffresty.HTTPConfigURL, "http://localhost:12345") + utTezosconnectConf.Set(ffresty.HTTPCustomClient, mockedClient) + utTezosconnectConf.Set(TezosconnectConfigTopic, "topic1") + + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(tz.ctx, 100, 5*time.Minute), nil) + err := tz.Init(tz.ctx, tz.cancelCtx, utConfig, tz.metrics, cmi) + assert.NoError(t, err) + + found, status, err := tz.GetContractListenerStatus(context.Background(), "sub1", true) + assert.Nil(t, status) + assert.Regexp(t, "FF10283", err) + assert.False(t, found) +} + +func TestGetContractListenerStatusGetSubNotFound(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + mockedClient := &http.Client{} + httpmock.ActivateNonDefault(mockedClient) + defer httpmock.DeactivateAndReset() + + httpmock.RegisterResponder("GET", "http://localhost:12345/eventstreams", + httpmock.NewJsonResponderOrPanic(200, []eventStream{})) + httpmock.RegisterResponder("POST", "http://localhost:12345/eventstreams", + httpmock.NewJsonResponderOrPanic(200, eventStream{ID: "es12345"})) + httpmock.RegisterResponder("GET", "http://localhost:12345/subscriptions", + httpmock.NewJsonResponderOrPanic(200, []subscription{})) + httpmock.RegisterResponder("GET", "http://localhost:12345/subscriptions/sub1", + httpmock.NewJsonResponderOrPanic(404, `not found`)) + + resetConf(tz) + utTezosconnectConf.Set(ffresty.HTTPConfigURL, "http://localhost:12345") + utTezosconnectConf.Set(ffresty.HTTPCustomClient, mockedClient) + utTezosconnectConf.Set(TezosconnectConfigTopic, "topic1") + + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(tz.ctx, 100, 5*time.Minute), nil) + err := tz.Init(tz.ctx, tz.cancelCtx, utConfig, tz.metrics, cmi) + assert.NoError(t, err) + + found, status, err := tz.GetContractListenerStatus(context.Background(), "sub1", true) + assert.Nil(t, status) + assert.Nil(t, err) + assert.False(t, found) +} + +func TestGetTransactionStatusSuccess(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + httpmock.ActivateNonDefault(tz.client.GetClient()) + defer httpmock.DeactivateAndReset() + + op := &core.Operation{ + Namespace: "ns1", + ID: fftypes.MustParseUUID("9ffc50ff-6bfe-4502-adc7-93aea54cc059"), + Status: "Pending", + } + + httpmock.RegisterResponder("GET", `http://localhost:12345/transactions/ns1:9ffc50ff-6bfe-4502-adc7-93aea54cc059`, + func(req *http.Request) (*http.Response, error) { + transactionStatus := make(map[string]interface{}) + transactionStatus["status"] = "Succeeded" + return httpmock.NewJsonResponderOrPanic(200, transactionStatus)(req) + }) + + status, err := tz.GetTransactionStatus(context.Background(), op) + assert.NotNil(t, status) + assert.NoError(t, err) +} + +func TestGetTransactionStatusFailed(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + httpmock.ActivateNonDefault(tz.client.GetClient()) + defer httpmock.DeactivateAndReset() + + op := &core.Operation{ + Namespace: "ns1", + ID: fftypes.MustParseUUID("9ffc50ff-6bfe-4502-adc7-93aea54cc059"), + Status: "Pending", + } + + httpmock.RegisterResponder("GET", `http://localhost:12345/transactions/ns1:9ffc50ff-6bfe-4502-adc7-93aea54cc059`, + func(req *http.Request) (*http.Response, error) { + transactionStatus := make(map[string]interface{}) + transactionStatus["status"] = "Failed" + return httpmock.NewJsonResponderOrPanic(200, transactionStatus)(req) + }) + + status, err := tz.GetTransactionStatus(context.Background(), op) + assert.NotNil(t, status) + assert.NoError(t, err) +} + +func TestGetTransactionStatusEmptyResult(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + httpmock.ActivateNonDefault(tz.client.GetClient()) + defer httpmock.DeactivateAndReset() + + op := &core.Operation{ + Namespace: "ns1", + ID: fftypes.MustParseUUID("9ffc50ff-6bfe-4502-adc7-93aea54cc059"), + Status: "Pending", + } + + httpmock.RegisterResponder("GET", `http://localhost:12345/transactions/ns1:9ffc50ff-6bfe-4502-adc7-93aea54cc059`, + func(req *http.Request) (*http.Response, error) { + return httpmock.NewJsonResponderOrPanic(200, make(map[string]interface{}))(req) + }) + + status, err := tz.GetTransactionStatus(context.Background(), op) + assert.NotNil(t, status) + assert.NoError(t, err) +} + +func TestGetTransactionStatusNoResult(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + httpmock.ActivateNonDefault(tz.client.GetClient()) + defer httpmock.DeactivateAndReset() + + op := &core.Operation{ + Namespace: "ns1", + ID: fftypes.MustParseUUID("9ffc50ff-6bfe-4502-adc7-93aea54cc059"), + } + + httpmock.RegisterResponder("GET", `http://localhost:12345/transactions/ns1:9ffc50ff-6bfe-4502-adc7-93aea54cc059`, + func(req *http.Request) (*http.Response, error) { + return httpmock.NewJsonResponderOrPanic(404, make(map[string]interface{}))(req) + }) + + status, err := tz.GetTransactionStatus(context.Background(), op) + assert.Nil(t, status) + assert.Nil(t, err) +} + +func TestGetTransactionStatusBadResult(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + httpmock.ActivateNonDefault(tz.client.GetClient()) + defer httpmock.DeactivateAndReset() + + op := &core.Operation{ + Namespace: "ns1", + ID: fftypes.MustParseUUID("9ffc50ff-6bfe-4502-adc7-93aea54cc059"), + } + + httpmock.RegisterResponder("GET", `http://localhost:12345/transactions/ns1:9ffc50ff-6bfe-4502-adc7-93aea54cc059`, + func(req *http.Request) (*http.Response, error) { + return httpmock.NewJsonResponderOrPanic(500, make(map[string]interface{}))(req) + }) + + status, err := tz.GetTransactionStatus(context.Background(), op) + assert.Nil(t, status) + assert.Error(t, err) +} + +func TestValidateInvokeRequest(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + err := tz.ValidateInvokeRequest(context.Background(), &ffiMethodAndErrors{ + method: &fftypes.FFIMethod{}, + }, nil, false) + assert.NoError(t, err) +} + +func TestGenerateErrorSignature(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + res := tz.GenerateErrorSignature(context.Background(), nil) + assert.Equal(t, res, "") +} + +func TestSubmitNetworkAction(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + location := fftypes.JSONAnyPtr(fftypes.JSONObject{ + "address": "KT123", + }.String()) + singer := "tz1Y6GnVhC4EpcDDSmD3ibcC4WX6DJ4Q1QLN" + + err := tz.SubmitNetworkAction(context.Background(), "", singer, core.NetworkActionTerminate, location) + assert.NoError(t, err) +} + +func TestSubmitBatchPin(t *testing.T) { + tz, cancel := newTestTezos() + defer cancel() + + location := fftypes.JSONAnyPtr(fftypes.JSONObject{ + "address": "KT123", + }.String()) + singer := "tz1Y6GnVhC4EpcDDSmD3ibcC4WX6DJ4Q1QLN" + + err := tz.SubmitBatchPin(context.Background(), "", "", singer, nil, location) + assert.NoError(t, err) +} diff --git a/internal/broadcast/manager.go b/internal/broadcast/manager.go index dac7775fb5..52b93ece52 100644 --- a/internal/broadcast/manager.go +++ b/internal/broadcast/manager.go @@ -55,7 +55,7 @@ type Manager interface { // From operations.OperationHandler PrepareOperation(ctx context.Context, op *core.Operation) (*core.PreparedOperation, error) - RunOperation(ctx context.Context, op *core.PreparedOperation) (outputs fftypes.JSONObject, complete bool, err error) + RunOperation(ctx context.Context, op *core.PreparedOperation) (outputs fftypes.JSONObject, phase core.OpPhase, err error) } type broadcastManager struct { @@ -137,7 +137,7 @@ func (bm *broadcastManager) Name() string { func (bm *broadcastManager) dispatchBatch(ctx context.Context, payload *batch.DispatchPayload) error { // Ensure all the blobs are published - if err := bm.uploadBlobs(ctx, payload.Batch.TX.ID, payload.Data); err != nil { + if err := bm.uploadBlobs(ctx, payload.Batch.TX.ID, payload.Data, false /* batch processing does not currently use idempotency keys */); err != nil { return err } @@ -156,20 +156,20 @@ func (bm *broadcastManager) dispatchBatch(ctx context.Context, payload *batch.Di // We are in an (indefinite) retry cycle from the batch processor to dispatch this batch, that is only // terminated with shutdown. So we leave the operation pending on failure, as it is still being retried. // The user will still have the failure details recorded. - outputs, err := bm.operations.RunOperation(ctx, opUploadBatch(op, batch), operations.RemainPendingOnFailure) + outputs, err := bm.operations.RunOperation(ctx, opUploadBatch(op, batch), false /* batch processing does not currently use idempotency keys */) if err != nil { return err } payloadRef := outputs.GetString("payloadRef") log.L(ctx).Infof("Pinning broadcast batch %s with author=%s key=%s payloadRef=%s", batch.ID, batch.Author, batch.Key, payloadRef) - return bm.multiparty.SubmitBatchPin(ctx, &payload.Batch, payload.Pins, payloadRef) + return bm.multiparty.SubmitBatchPin(ctx, &payload.Batch, payload.Pins, payloadRef, false /* batch processing does not currently use idempotency keys */) } -func (bm *broadcastManager) uploadBlobs(ctx context.Context, tx *fftypes.UUID, data core.DataArray) error { +func (bm *broadcastManager) uploadBlobs(ctx context.Context, tx *fftypes.UUID, data core.DataArray, idempotentSubmit bool) error { for _, d := range data { // We only need to send a blob if there is one, and it's not been uploaded to the shared storage if d.Blob != nil && d.Blob.Hash != nil && d.Blob.Public == "" { - if err := bm.uploadDataBlob(ctx, tx, d); err != nil { + if err := bm.uploadDataBlob(ctx, tx, d, idempotentSubmit); err != nil { return err } } @@ -195,7 +195,7 @@ func (bm *broadcastManager) resolveData(ctx context.Context, id string) (*core.D return d, nil } -func (bm *broadcastManager) uploadDataBlob(ctx context.Context, tx *fftypes.UUID, d *core.Data) error { +func (bm *broadcastManager) uploadDataBlob(ctx context.Context, tx *fftypes.UUID, d *core.Data, idempotentSubmit bool) error { if d.Blob == nil || d.Blob.Hash == nil { return i18n.NewError(ctx, coremsgs.MsgDataDoesNotHaveBlob) } @@ -218,7 +218,7 @@ func (bm *broadcastManager) uploadDataBlob(ctx context.Context, tx *fftypes.UUID return i18n.NewError(ctx, coremsgs.MsgBlobNotFound, d.Blob.Hash) } - _, err = bm.operations.RunOperation(ctx, opUploadBlob(op, d, blobs[0])) + _, err = bm.operations.RunOperation(ctx, opUploadBlob(op, d, blobs[0]), idempotentSubmit) return err } @@ -233,18 +233,27 @@ func (bm *broadcastManager) PublishDataValue(ctx context.Context, id string, ide if err != nil { // Check if we've clashed on idempotency key. There might be operations still in "Initialized" state that need // submitting to their handlers + resubmitWholeTX := false if idemErr, ok := err.(*sqlcommon.IdempotencyError); ok { - operation, resubmitErr := bm.operations.ResubmitOperations(ctx, idemErr.ExistingTXID) + total, resubmitted, resubmitErr := bm.operations.ResubmitOperations(ctx, idemErr.ExistingTXID) - if resubmitErr != nil { + switch { + case resubmitErr != nil: // Error doing resubmit, return the new error err = resubmitErr - } else if operation != nil { + case total == 0: + // We didn't do anything last time - just start again + txid = idemErr.ExistingTXID + resubmitWholeTX = true + err = nil + case len(resubmitted) > 0: // We successfully resubmitted an initialized operation, return 2xx not 409 err = nil } } - return d, err + if !resubmitWholeTX { + return d, err + } } op := core.NewOperation( @@ -257,7 +266,7 @@ func (bm *broadcastManager) PublishDataValue(ctx context.Context, id string, ide return nil, err } - if _, err := bm.operations.RunOperation(ctx, opUploadValue(op, d)); err != nil { + if _, err := bm.operations.RunOperation(ctx, opUploadValue(op, d), idempotencyKey != ""); err != nil { return nil, err } @@ -275,21 +284,30 @@ func (bm *broadcastManager) PublishDataBlob(ctx context.Context, id string, idem if err != nil { // Check if we've clashed on idempotency key. There might be operations still in "Initialized" state that need // submitting to their handlers + resubmitWholeTX := false if idemErr, ok := err.(*sqlcommon.IdempotencyError); ok { - operation, resubmitErr := bm.operations.ResubmitOperations(ctx, idemErr.ExistingTXID) + total, resubmitted, resubmitErr := bm.operations.ResubmitOperations(ctx, idemErr.ExistingTXID) - if resubmitErr != nil { + switch { + case resubmitErr != nil: // Error doing resubmit, return the new error err = resubmitErr - } else if operation != nil { + case total == 0: + // We didn't do anything last time - just start again + txid = idemErr.ExistingTXID + resubmitWholeTX = true + err = nil + case len(resubmitted) > 0: // We successfully resubmitted an initialized operation, return 2xx not 409 err = nil } } - return d, err + if !resubmitWholeTX { + return d, err + } } - if err = bm.uploadDataBlob(ctx, txid, d); err != nil { + if err = bm.uploadDataBlob(ctx, txid, d, idempotencyKey != ""); err != nil { return nil, err } diff --git a/internal/broadcast/manager_test.go b/internal/broadcast/manager_test.go index 8aa5866cae..a57347d11d 100644 --- a/internal/broadcast/manager_test.go +++ b/internal/broadcast/manager_test.go @@ -28,7 +28,6 @@ import ( "github.com/hyperledger/firefly/internal/coremsgs" "github.com/hyperledger/firefly/internal/data" "github.com/hyperledger/firefly/internal/database/sqlcommon" - "github.com/hyperledger/firefly/internal/operations" "github.com/hyperledger/firefly/mocks/batchmocks" "github.com/hyperledger/firefly/mocks/blockchainmocks" "github.com/hyperledger/firefly/mocks/databasemocks" @@ -245,7 +244,7 @@ func TestDispatchBatchUploadFail(t *testing.T) { mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(uploadBatchData) return op.Type == core.OpTypeSharedStorageUploadBatch && data.Batch.ID.Equals(state.Batch.ID) - }), operations.RemainPendingOnFailure).Return(nil, fmt.Errorf("pop")) + }), false).Return(nil, fmt.Errorf("pop")) err := bm.dispatchBatch(context.Background(), state) assert.EqualError(t, err, "pop") @@ -270,11 +269,11 @@ func TestDispatchBatchSubmitBatchPinSucceed(t *testing.T) { mmp := bm.multiparty.(*multipartymocks.Manager) mom := bm.operations.(*operationmocks.Manager) mom.On("AddOrReuseOperation", mock.Anything, mock.Anything).Return(nil) - mmp.On("SubmitBatchPin", mock.Anything, mock.Anything, mock.Anything, "payload1").Return(nil) + mmp.On("SubmitBatchPin", mock.Anything, mock.Anything, mock.Anything, "payload1", false).Return(nil) mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(uploadBatchData) return op.Type == core.OpTypeSharedStorageUploadBatch && data.Batch.ID.Equals(state.Batch.ID) - }), operations.RemainPendingOnFailure).Return(getUploadBatchOutputs("payload1"), nil) + }), false).Return(getUploadBatchOutputs("payload1"), nil) err := bm.dispatchBatch(context.Background(), state) assert.NoError(t, err) @@ -302,11 +301,11 @@ func TestDispatchBatchSubmitBroadcastFail(t *testing.T) { mmp := bm.multiparty.(*multipartymocks.Manager) mom := bm.operations.(*operationmocks.Manager) mom.On("AddOrReuseOperation", mock.Anything, mock.Anything).Return(nil) - mmp.On("SubmitBatchPin", mock.Anything, mock.Anything, mock.Anything, "payload1").Return(fmt.Errorf("pop")) + mmp.On("SubmitBatchPin", mock.Anything, mock.Anything, mock.Anything, "payload1", false).Return(fmt.Errorf("pop")) mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(uploadBatchData) return op.Type == core.OpTypeSharedStorageUploadBatch && data.Batch.ID.Equals(state.Batch.ID) - }), operations.RemainPendingOnFailure).Return(getUploadBatchOutputs("payload1"), nil) + }), false).Return(getUploadBatchOutputs("payload1"), nil) err := bm.dispatchBatch(context.Background(), state) assert.EqualError(t, err, "pop") @@ -344,7 +343,7 @@ func TestUploadBlobPublishFail(t *testing.T) { mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(uploadBlobData) return op.Type == core.OpTypeSharedStorageUploadBlob && data.Blob == blob - })).Return(nil, fmt.Errorf("pop")) + }), true).Return(nil, fmt.Errorf("pop")) _, err := bm.PublishDataBlob(ctx, d.ID.String(), "idem1") assert.EqualError(t, err, "pop") @@ -379,7 +378,7 @@ func TestUploadBlobPublishIdempotentResubmitOperation(t *testing.T) { mtx.On("SubmitNewTransaction", mock.Anything, core.TransactionTypeDataPublish, core.IdempotencyKey("idem1")).Return(fftypes.NewUUID(), &sqlcommon.IdempotencyError{ ExistingTXID: id, OriginalError: i18n.NewError(context.Background(), coremsgs.MsgIdempotencyKeyDuplicateTransaction, "idem1", id)}) - mom.On("ResubmitOperations", context.Background(), id).Return(op, nil) + mom.On("ResubmitOperations", context.Background(), id).Return(1, []*core.Operation{op}, nil) mdi.On("GetDataByID", ctx, "ns1", d.ID, true).Return(d, nil) // If ResubmitOperations returns an operation it's because it found one to resubmit, we return 2xx not 409 and hence don't expect any errors here @@ -390,6 +389,43 @@ func TestUploadBlobPublishIdempotentResubmitOperation(t *testing.T) { mdi.AssertExpectations(t) } +func TestUploadBlobPublishIdempotentResubmitAll(t *testing.T) { + bm, cancel := newTestBroadcast(t) + var id = fftypes.NewUUID() + defer cancel() + mdi := bm.database.(*databasemocks.Plugin) + mom := bm.operations.(*operationmocks.Manager) + mtx := bm.txHelper.(*txcommonmocks.Helper) + + blob := &core.Blob{ + Hash: fftypes.NewRandB32(), + PayloadRef: "blob/1", + } + d := &core.Data{ + ID: fftypes.NewUUID(), + Blob: &core.BlobRef{ + Hash: blob.Hash, + }, + } + + ctx := context.Background() + mtx.On("SubmitNewTransaction", mock.Anything, core.TransactionTypeDataPublish, core.IdempotencyKey("idem1")).Return(fftypes.NewUUID(), &sqlcommon.IdempotencyError{ + ExistingTXID: id, + OriginalError: i18n.NewError(context.Background(), coremsgs.MsgIdempotencyKeyDuplicateTransaction, "idem1", id)}) + mom.On("ResubmitOperations", context.Background(), id).Return(0, nil, nil) + mdi.On("GetDataByID", ctx, "ns1", d.ID, true).Return(d, nil) + mom.On("AddOrReuseOperation", mock.Anything, mock.Anything).Return(nil) + mdi.On("GetBlobs", ctx, bm.namespace.Name, mock.Anything).Return([]*core.Blob{blob}, nil, nil) + mom.On("RunOperation", mock.Anything, mock.Anything, true).Return(nil, nil) + + // If ResubmitOperations returns an operation it's because it found one to resubmit, we return 2xx not 409 and hence don't expect any errors here + d, err := bm.PublishDataBlob(ctx, d.ID.String(), "idem1") + assert.NotNil(t, d) + assert.NoError(t, err) + + mdi.AssertExpectations(t) +} + func TestUploadBlobPublishIdempotentNoOperationToResubmit(t *testing.T) { bm, cancel := newTestBroadcast(t) var id = fftypes.NewUUID() @@ -413,7 +449,7 @@ func TestUploadBlobPublishIdempotentNoOperationToResubmit(t *testing.T) { mtx.On("SubmitNewTransaction", mock.Anything, core.TransactionTypeDataPublish, core.IdempotencyKey("idem1")).Return(fftypes.NewUUID(), &sqlcommon.IdempotencyError{ ExistingTXID: id, OriginalError: i18n.NewError(context.Background(), coremsgs.MsgIdempotencyKeyDuplicateTransaction, "idem1", id)}) - mom.On("ResubmitOperations", context.Background(), id).Return(nil, nil) + mom.On("ResubmitOperations", context.Background(), id).Return(1 /* total */, nil /* to resubmit */, nil) mdi.On("GetDataByID", ctx, "ns1", d.ID, true).Return(d, nil) // If ResubmitOperations returns nil it's because there was no operation in initialized state, so we expect the regular 409 error back @@ -447,7 +483,7 @@ func TestUploadBlobPublishIdempotentErrorOnOperationResubmit(t *testing.T) { mtx.On("SubmitNewTransaction", mock.Anything, core.TransactionTypeDataPublish, core.IdempotencyKey("idem1")).Return(fftypes.NewUUID(), &sqlcommon.IdempotencyError{ ExistingTXID: id, OriginalError: i18n.NewError(context.Background(), coremsgs.MsgIdempotencyKeyDuplicateTransaction, "idem1", id)}) - mom.On("ResubmitOperations", context.Background(), id).Return(nil, fmt.Errorf("pop")) + mom.On("ResubmitOperations", context.Background(), id).Return(-1, nil, fmt.Errorf("pop")) mdi.On("GetDataByID", ctx, "ns1", d.ID, true).Return(d, nil) // If ResubmitOperations returned an error trying to resubmit an operation we expect that error back, not the 409 conflict error @@ -482,7 +518,7 @@ func TestUploadBlobsGetBlobFail(t *testing.T) { Hash: blob.Hash, }, }, - }) + }, false) assert.Regexp(t, "pop", err) mdi.AssertExpectations(t) @@ -514,7 +550,7 @@ func TestUploadBlobsGetBlobNotFound(t *testing.T) { Hash: blob.Hash, }, }, - }) + }, false) assert.Regexp(t, "FF10239", err) mdi.AssertExpectations(t) @@ -543,7 +579,7 @@ func TestUploadBlobsGetBlobInsertOpFail(t *testing.T) { Hash: blob.Hash, }, }, - }) + }, true) assert.EqualError(t, err, "pop") mom.AssertExpectations(t) @@ -627,7 +663,7 @@ func TestUploadValueFail(t *testing.T) { mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(uploadValue) return op.Type == core.OpTypeSharedStorageUploadValue && data.Data.ID.Equals(d.ID) - })).Return(nil, fmt.Errorf("pop")) + }), false).Return(nil, fmt.Errorf("pop")) mtx := bm.txHelper.(*txcommonmocks.Helper) mtx.On("SubmitNewTransaction", mock.Anything, core.TransactionTypeDataPublish, core.IdempotencyKey("")).Return(fftypes.NewUUID(), nil) @@ -656,7 +692,7 @@ func TestUploadValueOK(t *testing.T) { mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(uploadValue) return op.Type == core.OpTypeSharedStorageUploadValue && data.Data.ID.Equals(d.ID) - })).Return(nil, nil) + }), false).Return(nil, nil) mtx := bm.txHelper.(*txcommonmocks.Helper) mtx.On("SubmitNewTransaction", mock.Anything, core.TransactionTypeDataPublish, core.IdempotencyKey("")).Return(fftypes.NewUUID(), nil) @@ -690,7 +726,39 @@ func TestUploadValueIdempotentResubmitOperation(t *testing.T) { mtx.On("SubmitNewTransaction", context.Background(), core.TransactionTypeDataPublish, core.IdempotencyKey("idem1")).Return(fftypes.NewUUID(), &sqlcommon.IdempotencyError{ ExistingTXID: id, OriginalError: i18n.NewError(context.Background(), coremsgs.MsgIdempotencyKeyDuplicateTransaction, "idem1", id)}) - mom.On("ResubmitOperations", context.Background(), id).Return(op, nil) + mom.On("ResubmitOperations", context.Background(), id).Return(1, []*core.Operation{op}, nil) + + // If ResubmitOperations returns an operation it's because it found one to resubmit, we return 2xx not 409 and hence don't expect any errors here + d1, err := bm.PublishDataValue(context.Background(), d.ID.String(), "idem1") + assert.NoError(t, err) + assert.Equal(t, d.ID, d1.ID) + + mom.AssertExpectations(t) + mdi.AssertExpectations(t) +} + +func TestUploadValueIdempotentResubmitAll(t *testing.T) { + bm, cancel := newTestBroadcast(t) + var id = fftypes.NewUUID() + defer cancel() + + d := &core.Data{ + ID: fftypes.NewUUID(), + Value: fftypes.JSONAnyPtr(`{"some": "value"}`), + } + + mdi := bm.database.(*databasemocks.Plugin) + mdi.On("GetDataByID", mock.Anything, "ns1", d.ID, true).Return(d, nil) + + mom := bm.operations.(*operationmocks.Manager) + + mtx := bm.txHelper.(*txcommonmocks.Helper) + mtx.On("SubmitNewTransaction", context.Background(), core.TransactionTypeDataPublish, core.IdempotencyKey("idem1")).Return(fftypes.NewUUID(), &sqlcommon.IdempotencyError{ + ExistingTXID: id, + OriginalError: i18n.NewError(context.Background(), coremsgs.MsgIdempotencyKeyDuplicateTransaction, "idem1", id)}) + mom.On("ResubmitOperations", context.Background(), id).Return(0, nil, nil) + mom.On("AddOrReuseOperation", mock.Anything, mock.Anything).Return(nil) + mom.On("RunOperation", mock.Anything, mock.Anything, true).Return(nil, nil) // If ResubmitOperations returns an operation it's because it found one to resubmit, we return 2xx not 409 and hence don't expect any errors here d1, err := bm.PublishDataValue(context.Background(), d.ID.String(), "idem1") @@ -720,7 +788,7 @@ func TestUploadValueIdempotentNoOperationToResubmit(t *testing.T) { mtx.On("SubmitNewTransaction", context.Background(), core.TransactionTypeDataPublish, core.IdempotencyKey("idem1")).Return(fftypes.NewUUID(), &sqlcommon.IdempotencyError{ ExistingTXID: id, OriginalError: i18n.NewError(context.Background(), coremsgs.MsgIdempotencyKeyDuplicateTransaction, "idem1", id)}) - mom.On("ResubmitOperations", context.Background(), id).Return(nil, nil) + mom.On("ResubmitOperations", context.Background(), id).Return(1 /* total */, nil /* to resubmit */, nil) // If ResubmitOperations returns nil it's because there was no operation in initialized state, so we expect the regular 409 error back _, err := bm.PublishDataValue(context.Background(), d.ID.String(), "idem1") @@ -750,7 +818,7 @@ func TestUploadValueIdempotentErrorOnOperationResubmit(t *testing.T) { mtx.On("SubmitNewTransaction", context.Background(), core.TransactionTypeDataPublish, core.IdempotencyKey("idem1")).Return(fftypes.NewUUID(), &sqlcommon.IdempotencyError{ ExistingTXID: id, OriginalError: i18n.NewError(context.Background(), coremsgs.MsgIdempotencyKeyDuplicateTransaction, "idem1", id)}) - mom.On("ResubmitOperations", context.Background(), id).Return(nil, fmt.Errorf("pop")) + mom.On("ResubmitOperations", context.Background(), id).Return(-1, nil, fmt.Errorf("pop")) // If ResubmitOperations returns nil it's because there was no operation in initialized state, so we expect the regular 409 error back _, err := bm.PublishDataValue(context.Background(), d.ID.String(), "idem1") @@ -808,7 +876,7 @@ func TestUploadBlobOK(t *testing.T) { mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(uploadBlobData) return op.Type == core.OpTypeSharedStorageUploadBlob && data.Blob == blob - })).Return(nil, nil) + }), false).Return(nil, nil) mtx := bm.txHelper.(*txcommonmocks.Helper) mtx.On("SubmitNewTransaction", mock.Anything, core.TransactionTypeDataPublish, core.IdempotencyKey("")).Return(fftypes.NewUUID(), nil) diff --git a/internal/broadcast/operations.go b/internal/broadcast/operations.go index 68f74f412c..80d98b39cb 100644 --- a/internal/broadcast/operations.go +++ b/internal/broadcast/operations.go @@ -140,7 +140,7 @@ func (bm *broadcastManager) PrepareOperation(ctx context.Context, op *core.Opera } } -func (bm *broadcastManager) RunOperation(ctx context.Context, op *core.PreparedOperation) (outputs fftypes.JSONObject, complete bool, err error) { +func (bm *broadcastManager) RunOperation(ctx context.Context, op *core.PreparedOperation) (outputs fftypes.JSONObject, phase core.OpPhase, err error) { switch data := op.Data.(type) { case uploadBatchData: return bm.uploadBatch(ctx, data) @@ -149,71 +149,71 @@ func (bm *broadcastManager) RunOperation(ctx context.Context, op *core.PreparedO case uploadValue: return bm.uploadValue(ctx, data) default: - return nil, false, i18n.NewError(ctx, coremsgs.MsgOperationDataIncorrect, op.Data) + return nil, core.OpPhaseInitializing, i18n.NewError(ctx, coremsgs.MsgOperationDataIncorrect, op.Data) } } // uploadBatch uploads the serialized batch to public storage -func (bm *broadcastManager) uploadBatch(ctx context.Context, data uploadBatchData) (outputs fftypes.JSONObject, complete bool, err error) { +func (bm *broadcastManager) uploadBatch(ctx context.Context, data uploadBatchData) (outputs fftypes.JSONObject, phase core.OpPhase, err error) { // Serialize the full payload, which has already been sealed for us by the BatchManager data.Batch.Namespace = bm.namespace.NetworkName payload, err := json.Marshal(data.Batch) if err != nil { - return nil, false, i18n.WrapError(ctx, err, coremsgs.MsgSerializationFailed) + return nil, core.OpPhaseInitializing, i18n.WrapError(ctx, err, coremsgs.MsgSerializationFailed) } // Write it to IPFS to get a payload reference payloadRef, err := bm.sharedstorage.UploadData(ctx, bytes.NewReader(payload)) if err != nil { - return nil, false, err + return nil, core.OpPhaseInitializing, err } log.L(ctx).Infof("Published batch '%s' to shared storage: '%s'", data.Batch.ID, payloadRef) - return getUploadBatchOutputs(payloadRef), true, nil + return getUploadBatchOutputs(payloadRef), core.OpPhaseComplete, nil } // uploadBlob streams a blob from the local data exchange, to public storage -func (bm *broadcastManager) uploadBlob(ctx context.Context, data uploadBlobData) (outputs fftypes.JSONObject, complete bool, err error) { +func (bm *broadcastManager) uploadBlob(ctx context.Context, data uploadBlobData) (outputs fftypes.JSONObject, phase core.OpPhase, err error) { // Stream from the local data exchange ... reader, err := bm.exchange.DownloadBlob(ctx, data.Blob.PayloadRef) if err != nil { - return nil, false, i18n.WrapError(ctx, err, coremsgs.MsgDownloadBlobFailed, data.Blob.PayloadRef) + return nil, core.OpPhaseInitializing, i18n.WrapError(ctx, err, coremsgs.MsgDownloadBlobFailed, data.Blob.PayloadRef) } defer reader.Close() // ... to the shared storage data.Data.Blob.Public, err = bm.sharedstorage.UploadData(ctx, reader) if err != nil { - return nil, false, err + return nil, core.OpPhaseInitializing, err } // Update the data in the DB err = bm.database.UpdateData(ctx, bm.namespace.Name, data.Data.ID, database.DataQueryFactory.NewUpdate(ctx).Set("blob.public", data.Data.Blob.Public)) if err != nil { - return nil, false, err + return nil, core.OpPhaseInitializing, err } log.L(ctx).Infof("Published blob with hash '%s' for data '%s' to shared storage: '%s'", data.Data.Blob.Hash, data.Data.ID, data.Data.Blob.Public) - return getUploadBlobOutputs(data.Data.Blob.Public), true, nil + return getUploadBlobOutputs(data.Data.Blob.Public), core.OpPhaseComplete, nil } // uploadValue streams the value JSON from a data record to public storage -func (bm *broadcastManager) uploadValue(ctx context.Context, data uploadValue) (outputs fftypes.JSONObject, complete bool, err error) { +func (bm *broadcastManager) uploadValue(ctx context.Context, data uploadValue) (outputs fftypes.JSONObject, phase core.OpPhase, err error) { // Upload to shared storage data.Data.Public, err = bm.sharedstorage.UploadData(ctx, bytes.NewReader(data.Data.Value.Bytes())) if err != nil { - return nil, false, err + return nil, core.OpPhaseInitializing, err } // Update the public reference for the data in the DB err = bm.database.UpdateData(ctx, bm.namespace.Name, data.Data.ID, database.DataQueryFactory.NewUpdate(ctx).Set("public", data.Data.Public)) if err != nil { - return nil, false, err + return nil, core.OpPhaseInitializing, err } log.L(ctx).Infof("Published value for data '%s' to shared storage: '%s'", data.Data.ID, data.Data.Public) - return getUploadBlobOutputs(data.Data.Public), true, nil + return getUploadBlobOutputs(data.Data.Public), core.OpPhaseComplete, nil } func (bm *broadcastManager) OnOperationUpdate(ctx context.Context, op *core.Operation, update *core.OperationUpdate) error { diff --git a/internal/broadcast/operations_test.go b/internal/broadcast/operations_test.go index 43210812c9..ceec07c76f 100644 --- a/internal/broadcast/operations_test.go +++ b/internal/broadcast/operations_test.go @@ -6,7 +6,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -61,9 +61,9 @@ func TestPrepareAndRunBatchBroadcast(t *testing.T) { assert.NoError(t, err) assert.Equal(t, bp.ID, po.Data.(uploadBatchData).Batch.ID) - _, complete, err := bm.RunOperation(context.Background(), opUploadBatch(op, batch)) + _, phase, err := bm.RunOperation(context.Background(), opUploadBatch(op, batch)) - assert.True(t, complete) + assert.Equal(t, core.OpPhaseComplete, phase) assert.NoError(t, err) mps.AssertExpectations(t) @@ -159,9 +159,9 @@ func TestRunOperationNotSupported(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() - _, complete, err := bm.RunOperation(context.Background(), &core.PreparedOperation{}) + _, phase, err := bm.RunOperation(context.Background(), &core.PreparedOperation{}) - assert.False(t, complete) + assert.Equal(t, core.OpPhaseInitializing, phase) assert.Regexp(t, "FF10378", err) } @@ -178,9 +178,9 @@ func TestRunOperationBatchBroadcastInvalidData(t *testing.T) { }, } - _, complete, err := bm.RunOperation(context.Background(), opUploadBatch(op, batch)) + _, phase, err := bm.RunOperation(context.Background(), opUploadBatch(op, batch)) - assert.False(t, complete) + assert.Equal(t, core.OpPhaseInitializing, phase) assert.Regexp(t, "FF10137", err) } @@ -198,9 +198,9 @@ func TestRunOperationBatchBroadcastPublishFail(t *testing.T) { mps := bm.sharedstorage.(*sharedstoragemocks.Plugin) mps.On("UploadData", context.Background(), mock.Anything).Return("", fmt.Errorf("pop")) - _, complete, err := bm.RunOperation(context.Background(), opUploadBatch(op, batch)) + _, phase, err := bm.RunOperation(context.Background(), opUploadBatch(op, batch)) - assert.False(t, complete) + assert.Equal(t, core.OpPhaseInitializing, phase) assert.EqualError(t, err, "pop") mps.AssertExpectations(t) @@ -221,10 +221,10 @@ func TestRunOperationBatchBroadcast(t *testing.T) { mdi := bm.database.(*databasemocks.Plugin) mps.On("UploadData", context.Background(), mock.Anything).Return("123", nil) - outputs, complete, err := bm.RunOperation(context.Background(), opUploadBatch(op, batch)) + outputs, phase, err := bm.RunOperation(context.Background(), opUploadBatch(op, batch)) assert.Equal(t, "123", outputs["payloadRef"]) - assert.True(t, complete) + assert.Equal(t, core.OpPhaseComplete, phase) assert.NoError(t, err) mps.AssertExpectations(t) @@ -273,10 +273,10 @@ func TestPrepareAndRunUploadBlob(t *testing.T) { assert.Equal(t, data, po.Data.(uploadBlobData).Data) assert.Equal(t, blob, po.Data.(uploadBlobData).Blob) - outputs, complete, err := bm.RunOperation(context.Background(), opUploadBlob(op, data, blob)) + outputs, phase, err := bm.RunOperation(context.Background(), opUploadBlob(op, data, blob)) assert.Equal(t, "123", outputs["payloadRef"]) - assert.True(t, complete) + assert.Equal(t, core.OpPhaseComplete, phase) assert.NoError(t, err) mps.AssertExpectations(t) @@ -316,10 +316,10 @@ func TestPrepareAndRunValue(t *testing.T) { assert.NoError(t, err) assert.Equal(t, data, po.Data.(uploadValue).Data) - outputs, complete, err := bm.RunOperation(context.Background(), opUploadValue(op, data)) + outputs, phase, err := bm.RunOperation(context.Background(), opUploadValue(op, data)) assert.Equal(t, "123", outputs["payloadRef"]) - assert.True(t, complete) + assert.Equal(t, core.OpPhaseComplete, phase) assert.NoError(t, err) mps.AssertExpectations(t) @@ -527,9 +527,9 @@ func TestRunOperationUploadBlobUpdateFail(t *testing.T) { mps.On("UploadData", context.Background(), mock.Anything).Return("123", nil) mdi.On("UpdateData", context.Background(), "ns1", data.ID, mock.Anything).Return(fmt.Errorf("pop")) - _, complete, err := bm.RunOperation(context.Background(), opUploadBlob(op, data, blob)) + _, phase, err := bm.RunOperation(context.Background(), opUploadBlob(op, data, blob)) - assert.False(t, complete) + assert.Equal(t, core.OpPhaseInitializing, phase) assert.Regexp(t, "pop", err) mps.AssertExpectations(t) @@ -553,9 +553,9 @@ func TestRunOperationUploadValueUpdateFail(t *testing.T) { mps.On("UploadData", context.Background(), mock.Anything).Return("123", nil) mdi.On("UpdateData", context.Background(), "ns1", data.ID, mock.Anything).Return(fmt.Errorf("pop")) - _, complete, err := bm.RunOperation(context.Background(), opUploadValue(op, data)) + _, phase, err := bm.RunOperation(context.Background(), opUploadValue(op, data)) - assert.False(t, complete) + assert.Equal(t, core.OpPhaseInitializing, phase) assert.Regexp(t, "pop", err) mps.AssertExpectations(t) @@ -584,9 +584,9 @@ func TestRunOperationUploadBlobUploadFail(t *testing.T) { mdx.On("DownloadBlob", context.Background(), mock.Anything).Return(reader, nil) mps.On("UploadData", context.Background(), mock.Anything).Return("", fmt.Errorf("pop")) - _, complete, err := bm.RunOperation(context.Background(), opUploadBlob(op, data, blob)) + _, phase, err := bm.RunOperation(context.Background(), opUploadBlob(op, data, blob)) - assert.False(t, complete) + assert.Equal(t, core.OpPhaseInitializing, phase) assert.Regexp(t, "pop", err) mps.AssertExpectations(t) @@ -606,9 +606,9 @@ func TestRunOperationUploadValueUploadFail(t *testing.T) { mps := bm.sharedstorage.(*sharedstoragemocks.Plugin) mps.On("UploadData", context.Background(), mock.Anything).Return("", fmt.Errorf("pop")) - _, complete, err := bm.RunOperation(context.Background(), opUploadValue(op, data)) + _, phase, err := bm.RunOperation(context.Background(), opUploadValue(op, data)) - assert.False(t, complete) + assert.Equal(t, core.OpPhaseInitializing, phase) assert.Regexp(t, "pop", err) mps.AssertExpectations(t) @@ -635,9 +635,9 @@ func TestRunOperationUploadBlobDownloadFail(t *testing.T) { reader := ioutil.NopCloser(strings.NewReader("some data")) mdx.On("DownloadBlob", context.Background(), mock.Anything).Return(reader, fmt.Errorf("pop")) - _, complete, err := bm.RunOperation(context.Background(), opUploadBlob(op, data, blob)) + _, phase, err := bm.RunOperation(context.Background(), opUploadBlob(op, data, blob)) - assert.False(t, complete) + assert.Equal(t, core.OpPhaseInitializing, phase) assert.Regexp(t, "pop", err) mps.AssertExpectations(t) diff --git a/internal/cache/cache.go b/internal/cache/cache.go index a32b8062d0..ec676fddc9 100644 --- a/internal/cache/cache.go +++ b/internal/cache/cache.go @@ -19,12 +19,11 @@ package cache import ( "context" "strings" - "sync" "time" + "github.com/hyperledger/firefly-common/pkg/cache" "github.com/hyperledger/firefly-common/pkg/config" "github.com/hyperledger/firefly-common/pkg/i18n" - "github.com/karlseguin/ccache" "github.com/hyperledger/firefly/internal/coreconfig" "github.com/hyperledger/firefly/internal/coremsgs" @@ -55,7 +54,7 @@ func (cc *CConfig) UniqueName() (string, error) { if err != nil { return "", err } - return cc.namespace + "::" + category, nil + return category, nil } func (cc *CConfig) Category() (string, error) { @@ -101,90 +100,20 @@ func (cc *CConfig) TTL() time.Duration { type Manager interface { GetCache(cc *CConfig) (CInterface, error) ResetCachesForNamespace(ns string) - ListKeys() []string + ListCacheNames(namespace string) []string } -type CInterface interface { - Get(key string) interface{} - Set(key string, val interface{}) - - GetString(key string) string - SetString(key string, val string) - - GetInt(key string) int - SetInt(key string, val int) -} - -type CCache struct { - enabled bool - ctx context.Context - namespace string - name string - cache *ccache.Cache - cacheTTL time.Duration -} - -func (c *CCache) Set(key string, val interface{}) { - if !c.enabled { - return - } - c.cache.Set(c.name+":"+key, val, c.cacheTTL) -} -func (c *CCache) Get(key string) interface{} { - if !c.enabled { - return nil - } - if cached := c.cache.Get(c.name + ":" + key); cached != nil { - cached.Extend(c.cacheTTL) - return cached.Value() - } - return nil -} - -func (c *CCache) SetString(key string, val string) { - c.Set(key, val) -} - -func (c *CCache) GetString(key string) string { - val := c.Get(key) - if val != nil { - return c.Get(key).(string) - } - return "" -} - -func (c *CCache) SetInt(key string, val int) { - c.Set(key, val) -} - -func (c *CCache) GetInt(key string) int { - val := c.Get(key) - if val != nil { - return c.Get(key).(int) - } - return 0 -} +type CInterface cache.CInterface type cacheManager struct { - ctx context.Context - enabled bool - m sync.Mutex - // maintain a list of named configured CCache, the name are unique configuration category id - // e.g. cache.batch - configuredCaches map[string]*CCache + ffcache cache.Manager } func (cm *cacheManager) ResetCachesForNamespace(ns string) { - cm.m.Lock() - defer cm.m.Unlock() - for k, c := range cm.configuredCaches { - if c.namespace == ns { - // Clear the cache to free the memory immediately - c.cache.Clear() - // Remove it from the map, so the next call will generate a new one - delete(cm.configuredCaches, k) - } - } + cm.ffcache.ResetCaches(ns) +} +func (cm *cacheManager) ListCacheNames(namespace string) []string { + return cm.ffcache.ListCacheNames(namespace) } func (cm *cacheManager) GetCache(cc *CConfig) (CInterface, error) { @@ -196,47 +125,24 @@ func (cm *cacheManager) GetCache(cc *CConfig) (CInterface, error) { if err != nil { return nil, err } - cm.m.Lock() - cache, exists := cm.configuredCaches[cacheName] - if !exists { - cache = &CCache{ - ctx: cc.ctx, - namespace: cc.namespace, - name: cacheName, - cache: ccache.New(ccache.Configure().MaxSize(maxSize)), - cacheTTL: cc.TTL(), - enabled: cm.enabled, - } - cm.configuredCaches[cacheName] = cache - } - cm.m.Unlock() - return cache, nil -} -func (cm *cacheManager) ListKeys() []string { - keys := make([]string, 0, len(cm.configuredCaches)) - for k := range cm.configuredCaches { - keys = append(keys, k) - } - return keys + return cm.ffcache.GetCache( + cc.ctx, + cc.namespace, + cacheName, + maxSize, + cc.TTL(), + cm.ffcache.IsEnabled(), + ) } - func NewCacheManager(ctx context.Context) Manager { cm := &cacheManager{ - ctx: ctx, - enabled: config.GetBool(coreconfig.CacheEnabled), - configuredCaches: map[string]*CCache{}, + ffcache: cache.NewCacheManager(ctx, config.GetBool(coreconfig.CacheEnabled)), } return cm } // should only be used for testing purpose func NewUmanagedCache(ctx context.Context, sizeLimit int64, ttl time.Duration) CInterface { - return &CCache{ - ctx: ctx, - name: "cache.unmanaged", - cache: ccache.New(ccache.Configure().MaxSize(sizeLimit)), - cacheTTL: ttl, - enabled: true, - } + return cache.NewUmanagedCache(ctx, sizeLimit, ttl) } diff --git a/internal/cache/cache_test.go b/internal/cache/cache_test.go index b3a8000a18..a5b129d907 100644 --- a/internal/cache/cache_test.go +++ b/internal/cache/cache_test.go @@ -48,11 +48,12 @@ func TestGetCacheReturnsSameCacheForSameConfig(t *testing.T) { cache1, _ := cacheManager.GetCache(NewCacheConfig(ctx, "cache.batch.limit", "cache.batch.ttl", "testnamespace")) assert.Equal(t, cache0, cache1) - assert.Equal(t, []string{"testnamespace::cache.batch"}, cacheManager.ListKeys()) + assert.Equal(t, []string{"testnamespace:cache.batch"}, cacheManager.ListCacheNames("testnamespace")) cache2, _ := cacheManager.GetCache(NewCacheConfig(ctx, "cache.batch.limit", "cache.batch.ttl", "")) assert.NotEqual(t, cache0, cache2) - assert.Equal(t, 2, len(cacheManager.ListKeys())) + assert.Equal(t, 1, len(cacheManager.ListCacheNames("testnamespace"))) + assert.Equal(t, 1, len(cacheManager.ListCacheNames("global"))) } func TestTwoSeparateCacheWorksIndependently(t *testing.T) { diff --git a/internal/contracts/manager.go b/internal/contracts/manager.go index 28e050ff49..85ff63eba2 100644 --- a/internal/contracts/manager.go +++ b/internal/contracts/manager.go @@ -18,7 +18,11 @@ package contracts import ( "context" + "crypto/sha256" + "database/sql/driver" + "encoding/hex" "fmt" + "hash" "strings" "github.com/hyperledger/firefly-common/pkg/ffapi" @@ -27,6 +31,8 @@ import ( "github.com/hyperledger/firefly-common/pkg/log" "github.com/hyperledger/firefly/internal/batch" "github.com/hyperledger/firefly/internal/broadcast" + "github.com/hyperledger/firefly/internal/cache" + "github.com/hyperledger/firefly/internal/coreconfig" "github.com/hyperledger/firefly/internal/coremsgs" "github.com/hyperledger/firefly/internal/data" "github.com/hyperledger/firefly/internal/database/sqlcommon" @@ -35,6 +41,7 @@ import ( "github.com/hyperledger/firefly/internal/privatemessaging" "github.com/hyperledger/firefly/internal/syncasync" "github.com/hyperledger/firefly/internal/txcommon" + "github.com/hyperledger/firefly/internal/txwriter" "github.com/hyperledger/firefly/pkg/blockchain" "github.com/hyperledger/firefly/pkg/core" "github.com/hyperledger/firefly/pkg/database" @@ -53,6 +60,7 @@ type Manager interface { GetFFIs(ctx context.Context, filter ffapi.AndFilter) ([]*fftypes.FFI, *ffapi.FilterResult, error) ResolveFFI(ctx context.Context, ffi *fftypes.FFI) error ResolveFFIReference(ctx context.Context, ref *fftypes.FFIReference) error + DeleteFFI(ctx context.Context, id *fftypes.UUID) error DeployContract(ctx context.Context, req *core.ContractDeployRequest, waitConfirm bool) (interface{}, error) InvokeContract(ctx context.Context, req *core.ContractCallRequest, waitConfirm bool) (interface{}, error) @@ -61,6 +69,7 @@ type Manager interface { GetContractAPIInterface(ctx context.Context, apiName string) (*fftypes.FFI, error) GetContractAPIs(ctx context.Context, httpServerURL string, filter ffapi.AndFilter) ([]*core.ContractAPI, *ffapi.FilterResult, error) ResolveContractAPI(ctx context.Context, httpServerURL string, api *core.ContractAPI) error + DeleteContractAPI(ctx context.Context, apiName string) error AddContractListener(ctx context.Context, listener *core.ContractListenerInput) (output *core.ContractListener, err error) AddContractAPIListener(ctx context.Context, apiName, eventPath string, listener *core.ContractListener) (output *core.ContractListener, err error) @@ -73,7 +82,7 @@ type Manager interface { // From operations.OperationHandler PrepareOperation(ctx context.Context, op *core.Operation) (*core.PreparedOperation, error) - RunOperation(ctx context.Context, op *core.PreparedOperation) (outputs fftypes.JSONObject, complete bool, err error) + RunOperation(ctx context.Context, op *core.PreparedOperation) (outputs fftypes.JSONObject, phase core.OpPhase, err error) } type contractManager struct { @@ -84,15 +93,26 @@ type contractManager struct { messaging privatemessaging.Manager // optional batch batch.Manager // optional txHelper txcommon.Helper + txWriter txwriter.Writer identity identity.Manager blockchain blockchain.Plugin ffiParamValidator fftypes.FFIParamValidator operations operations.Manager syncasync syncasync.Bridge + methodCache cache.CInterface } -func NewContractManager(ctx context.Context, ns string, di database.Plugin, bi blockchain.Plugin, dm data.Manager, bm broadcast.Manager, pm privatemessaging.Manager, bp batch.Manager, im identity.Manager, om operations.Manager, txHelper txcommon.Helper, sa syncasync.Bridge) (Manager, error) { - if di == nil || im == nil || bi == nil || dm == nil || om == nil || txHelper == nil || sa == nil { +type methodCacheEntry struct { + method *fftypes.FFIMethod + errors []*fftypes.FFIError +} + +type schemaValidationEntry struct { + schema *jsonschema.Schema +} + +func NewContractManager(ctx context.Context, ns string, di database.Plugin, bi blockchain.Plugin, dm data.Manager, bm broadcast.Manager, pm privatemessaging.Manager, bp batch.Manager, im identity.Manager, om operations.Manager, txHelper txcommon.Helper, txWriter txwriter.Writer, sa syncasync.Bridge, cacheManager cache.Manager) (Manager, error) { + if di == nil || im == nil || bi == nil || dm == nil || om == nil || txHelper == nil || txWriter == nil || sa == nil || cacheManager == nil { return nil, i18n.NewError(ctx, coremsgs.MsgInitializationNilDepError, "ContractManager") } v, err := bi.GetFFIParamValidator(ctx) @@ -108,6 +128,7 @@ func NewContractManager(ctx context.Context, ns string, di database.Plugin, bi b messaging: pm, batch: bp, txHelper: txHelper, + txWriter: txWriter, identity: im, blockchain: bi, ffiParamValidator: v, @@ -115,6 +136,18 @@ func NewContractManager(ctx context.Context, ns string, di database.Plugin, bi b syncasync: sa, } + cm.methodCache, err = cacheManager.GetCache( + cache.NewCacheConfig( + ctx, + coreconfig.CacheMethodsLimit, + coreconfig.CacheMethodsTTL, + ns, + ), + ) + if err != nil { + return nil, err + } + om.RegisterHandler(ctx, cm, []core.OpType{ core.OpTypeBlockchainInvoke, core.OpTypeBlockchainContractDeploy, @@ -141,7 +174,13 @@ func (cm *contractManager) newFFISchemaCompiler() *jsonschema.Compiler { } func (cm *contractManager) GetFFI(ctx context.Context, name, version string) (*fftypes.FFI, error) { - return cm.database.GetFFI(ctx, cm.namespace, name, version) + ffi, err := cm.database.GetFFI(ctx, cm.namespace, name, version) + if err != nil { + return nil, err + } else if ffi == nil { + return nil, i18n.NewError(ctx, coremsgs.Msg404NotFound) + } + return ffi, nil } func (cm *contractManager) GetFFIWithChildren(ctx context.Context, name, version string) (*fftypes.FFI, error) { @@ -241,78 +280,82 @@ func (cm *contractManager) verifyListeners(ctx context.Context) error { } -func (cm *contractManager) writeInvokeTransaction(ctx context.Context, req *core.ContractCallRequest) (*core.Operation, error) { +func (cm *contractManager) writeInvokeTransaction(ctx context.Context, req *core.ContractCallRequest) (bool, *core.Operation, error) { txtype := core.TransactionTypeContractInvoke if req.Message != nil { txtype = core.TransactionTypeContractInvokePin } - txid, err := cm.txHelper.SubmitNewTransaction(ctx, txtype, req.IdempotencyKey) - var op *core.Operation + op := core.NewOperation( + cm.blockchain, + cm.namespace, + nil, // assigned by txwriter + core.OpTypeBlockchainInvoke) + if err := addBlockchainReqInputs(op, req); err != nil { + return false, nil, err + } + + txn, err := cm.txWriter.WriteTransactionAndOps(ctx, txtype, req.IdempotencyKey, op) if err != nil { - var resubmitErr error // Check if we've clashed on idempotency key. There might be operations still in "Initialized" state that need // submitting to their handlers if idemErr, ok := err.(*sqlcommon.IdempotencyError); ok { - op, resubmitErr = cm.operations.ResubmitOperations(ctx, idemErr.ExistingTXID) + // Note we don't need to worry about re-entering this code zero-ops in this case, as we write everything as a batch in WriteTransactionAndOps. + _, resubmitted, resubmitErr := cm.operations.ResubmitOperations(ctx, idemErr.ExistingTXID) if resubmitErr != nil { // Error doing resubmit, return the new error err = resubmitErr - } else if op != nil { + } else if len(resubmitted) > 0 { // We successfully resubmitted an initialized operation, return the operation // and the idempotent error. The caller will revert the 409 to 2xx - err = idemErr + if req.Message != nil { + req.Message.Header.TxType = txtype + req.Message.TransactionID = idemErr.ExistingTXID + } + return true, resubmitted[0], nil // only one operation, return existing one } } - return op, err + return false, op, err } if req.Message != nil { req.Message.Header.TxType = txtype - req.Message.TransactionID = txid + req.Message.TransactionID = txn.ID } - op = core.NewOperation( + return false, op, err +} + +func (cm *contractManager) writeDeployTransaction(ctx context.Context, req *core.ContractDeployRequest) (bool, *core.Operation, error) { + + op := core.NewOperation( cm.blockchain, cm.namespace, - txid, - core.OpTypeBlockchainInvoke) - if err = addBlockchainReqInputs(op, req); err == nil { - err = cm.operations.AddOrReuseOperation(ctx, op) + nil, // assigned by txwriter + core.OpTypeBlockchainContractDeploy) + if err := addBlockchainReqInputs(op, req); err != nil { + return false, nil, err } - return op, err -} - -func (cm *contractManager) writeDeployTransaction(ctx context.Context, req *core.ContractDeployRequest) (*core.Operation, error) { - txid, err := cm.txHelper.SubmitNewTransaction(ctx, core.TransactionTypeContractDeploy, req.IdempotencyKey) - var op *core.Operation + _, err := cm.txWriter.WriteTransactionAndOps(ctx, core.TransactionTypeContractDeploy, req.IdempotencyKey, op) if err != nil { - var resubmitErr error // Check if we've clashed on idempotency key. There might be operations still in "Initialized" state that need // submitting to their handlers if idemErr, ok := err.(*sqlcommon.IdempotencyError); ok { - op, resubmitErr = cm.operations.ResubmitOperations(ctx, idemErr.ExistingTXID) + // Note we don't need to worry about re-entering this code zero-ops in this case, as we write everything as a batch in WriteTransactionAndOps. + _, resubmitted, resubmitErr := cm.operations.ResubmitOperations(ctx, idemErr.ExistingTXID) if resubmitErr != nil { // Error doing resubmit, return the new error err = resubmitErr - } else if op != nil { + } else if len(resubmitted) > 0 { // We successfully resubmitted an initialized operation, return the operation // and the idempotent error. The caller will revert the 409 to 2xx - err = idemErr + return true, resubmitted[0], nil // only one operation, return existing one } } - return op, err + return false, op, err } - op = core.NewOperation( - cm.blockchain, - cm.namespace, - txid, - core.OpTypeBlockchainContractDeploy) - if err = addBlockchainReqInputs(op, req); err == nil { - err = cm.operations.AddOrReuseOperation(ctx, op) - } - return op, err + return false, op, err } func (cm *contractManager) DeployContract(ctx context.Context, req *core.ContractDeployRequest, waitConfirm bool) (res interface{}, err error) { @@ -321,24 +364,16 @@ func (cm *contractManager) DeployContract(ctx context.Context, req *core.Contrac return nil, err } - var op *core.Operation - err = cm.database.RunAsGroup(ctx, func(ctx context.Context) (err error) { - op, err = cm.writeDeployTransaction(ctx, req) - return err - }) + resubmit, op, err := cm.writeDeployTransaction(ctx, req) if err != nil { - if _, ok := err.(*sqlcommon.IdempotencyError); ok { - if op != nil { - // Idempotency key clash but we resubmitted an initialized operation? Return 20x, not 409 - return op, nil - } - } - // Any other error? Return the error unchanged return nil, err } + if resubmit { + return op, nil // nothing more to do + } send := func(ctx context.Context) error { - _, err := cm.operations.RunOperation(ctx, opBlockchainContractDeploy(op, req)) + _, err := cm.operations.RunOperation(ctx, opBlockchainContractDeploy(op, req), req.IdempotencyKey != "") return err } if waitConfirm { @@ -366,39 +401,31 @@ func (cm *contractManager) InvokeContract(ctx context.Context, req *core.Contrac } } - var op *core.Operation - err = cm.database.RunAsGroup(ctx, func(ctx context.Context) (err error) { - if err = cm.resolveInvokeContractRequest(ctx, req); err != nil { - return err - } - if err := cm.validateInvokeContractRequest(ctx, req); err != nil { - return err - } - if msgSender != nil { - if err = msgSender.Prepare(ctx); err != nil { - return err - } - // Clear inline data now that it's been resolved - // (so as not to store all data values on the operation inputs) - req.Message.InlineData = nil + if err := cm.resolveInvokeContractRequest(ctx, req); err != nil { + return nil, err + } + bcParsedMethod, err := cm.validateInvokeContractRequest(ctx, req, true) + if err != nil { + return nil, err + } + if msgSender != nil { + if err := msgSender.Prepare(ctx); err != nil { + return nil, err } - if req.Type == core.CallTypeInvoke { - op, err = cm.writeInvokeTransaction(ctx, req) - if err != nil { - return err - } + // Clear inline data now that it's been resolved + // (so as not to store all data values on the operation inputs) + req.Message.InlineData = nil + } + var op *core.Operation + var resubmit bool + if req.Type == core.CallTypeInvoke { + resubmit, op, err = cm.writeInvokeTransaction(ctx, req) + if err != nil { + return nil, err } - return nil - }) - if err != nil { - if _, ok := err.(*sqlcommon.IdempotencyError); ok { - if op != nil { - // Idempotency key clash but we resubmitted an initialized operation? Return 20x, not 409 - return op, nil - } + if resubmit { + return op, nil } - // Any other error? Return the error unchanged - return nil, err } switch req.Type { @@ -410,7 +437,7 @@ func (cm *contractManager) InvokeContract(ctx context.Context, req *core.Contrac return op, msgSender.Send(ctx) } send := func(ctx context.Context) error { - _, err := cm.operations.RunOperation(ctx, txcommon.OpBlockchainInvoke(op, req, nil)) + _, err := cm.operations.RunOperation(ctx, txcommon.OpBlockchainInvoke(op, req, nil), req.IdempotencyKey != "") return err } if waitConfirm { @@ -419,7 +446,7 @@ func (cm *contractManager) InvokeContract(ctx context.Context, req *core.Contrac return op, send(ctx) case core.CallTypeQuery: - return cm.blockchain.QueryContract(ctx, req.Key, req.Location, req.Method, req.Input, req.Errors, req.Options) + return cm.blockchain.QueryContract(ctx, req.Key, req.Location, bcParsedMethod, req.Input, req.Options) default: panic(fmt.Sprintf("unknown call type: %s", req.Type)) @@ -446,6 +473,14 @@ func (cm *contractManager) resolveInvokeContractRequest(ctx context.Context, req if req.MethodPath == "" || req.Interface == nil { return i18n.NewError(ctx, coremsgs.MsgContractMethodNotSet) } + cacheKey := fmt.Sprintf("method_%s_%s", req.Interface, req.MethodPath) + cached := cm.methodCache.Get(cacheKey) + if cached != nil { + cMethodDetails := cached.(*methodCacheEntry) + req.Method = cMethodDetails.method + req.Errors = cMethodDetails.errors + return nil + } req.Method, err = cm.database.GetFFIMethod(ctx, cm.namespace, req.Interface, req.MethodPath) if err != nil || req.Method == nil { return i18n.NewError(ctx, coremsgs.MsgContractMethodResolveError, err) @@ -455,6 +490,10 @@ func (cm *contractManager) resolveInvokeContractRequest(ctx context.Context, req if err != nil { return i18n.NewError(ctx, coremsgs.MsgContractErrorsResolveError, err) } + cm.methodCache.Set(cacheKey, &methodCacheEntry{ + method: req.Method, + errors: req.Errors, + }) } return nil } @@ -462,7 +501,8 @@ func (cm *contractManager) resolveInvokeContractRequest(ctx context.Context, req func (cm *contractManager) addContractURLs(httpServerURL string, api *core.ContractAPI) { if api != nil { // These URLs must match the actual routes in apiserver.createMuxRouter()! - baseURL := fmt.Sprintf("%s/namespaces/%s/apis/%s", httpServerURL, cm.namespace, api.Name) + // Note the httpServerURL includes the namespace + baseURL := fmt.Sprintf("%s/apis/%s", httpServerURL, api.Name) api.URLs.OpenAPI = baseURL + "/api/swagger.json" api.URLs.UI = baseURL + "/api" } @@ -491,6 +531,10 @@ func (cm *contractManager) GetContractAPIs(ctx context.Context, httpServerURL st } func (cm *contractManager) ResolveContractAPI(ctx context.Context, httpServerURL string, api *core.ContractAPI) (err error) { + if err := api.Validate(ctx); err != nil { + return err + } + if api.Location != nil { if api.Location, err = cm.blockchain.NormalizeContractLocation(ctx, blockchain.NormalizeCall, api.Location); err != nil { return err @@ -562,21 +606,16 @@ func (cm *contractManager) uniquePathName(name string, usedNames map[string]bool } func (cm *contractManager) ResolveFFI(ctx context.Context, ffi *fftypes.FFI) error { - if err := ffi.Validate(ctx, false); err != nil { + if err := ffi.Validate(ctx); err != nil { return err } - existing, err := cm.database.GetFFI(ctx, cm.namespace, ffi.Name, ffi.Version) - if existing != nil && err == nil { - return i18n.NewError(ctx, coremsgs.MsgContractInterfaceExists, ffi.Namespace, ffi.Name, ffi.Version) - } - methodPathNames := map[string]bool{} for _, method := range ffi.Methods { method.Interface = ffi.ID method.Namespace = ffi.Namespace method.Pathname = cm.uniquePathName(method.Name, methodPathNames) - if err := cm.validateFFIMethod(ctx, method); err != nil { + if _, _, err := cm.validateFFIMethod(ctx, method); err != nil { return err } } @@ -596,39 +635,59 @@ func (cm *contractManager) ResolveFFI(ctx context.Context, ffi *fftypes.FFI) err errorDef.Interface = ffi.ID errorDef.Namespace = ffi.Namespace errorDef.Pathname = cm.uniquePathName(errorDef.Name, errorPathNames) - if err := cm.validateFFIError(ctx, &errorDef.FFIErrorDefinition); err != nil { + if _, err := cm.validateFFIError(ctx, &errorDef.FFIErrorDefinition); err != nil { return err } } return nil } -func (cm *contractManager) validateFFIMethod(ctx context.Context, method *fftypes.FFIMethod) error { +func (cm *contractManager) validateFFIMethod(ctx context.Context, method *fftypes.FFIMethod) (hash.Hash, map[string]*jsonschema.Schema, error) { if method.Name == "" { - return i18n.NewError(ctx, coremsgs.MsgMethodNameMustBeSet) + return nil, nil, i18n.NewError(ctx, coremsgs.MsgMethodNameMustBeSet) } + paramSchemas := make(map[string]*jsonschema.Schema) + paramUniqueHash := sha256.New() for _, param := range method.Params { - if err := cm.validateFFIParam(ctx, param); err != nil { - return err + paramSchemaHash, schema, err := cm.validateFFIParam(ctx, param) + if err != nil { + return nil, nil, err } + paramSchemas[param.Name] = schema + // The input parsing is dependent on the parameter name, so it's important those are included in the hash + paramUniqueHash.Write([]byte(param.Name)) + paramUniqueHash.Write([]byte(paramSchemaHash)) } for _, param := range method.Returns { - if err := cm.validateFFIParam(ctx, param); err != nil { - return err + returnHash, _, err := cm.validateFFIParam(ctx, param) + if err != nil { + return nil, nil, err } + paramUniqueHash.Write([]byte(param.Name)) + paramUniqueHash.Write([]byte(returnHash)) } - return nil + return paramUniqueHash, paramSchemas, nil } -func (cm *contractManager) validateFFIParam(ctx context.Context, param *fftypes.FFIParam) error { +func (cm *contractManager) validateFFIParam(ctx context.Context, param *fftypes.FFIParam) (string, *jsonschema.Schema, error) { + schemaString := param.Schema.String() + schemaHash := hex.EncodeToString(fftypes.HashString(schemaString)[:]) + cacheKey := fmt.Sprintf("schema_%s", schemaHash) + cached := cm.methodCache.Get(cacheKey) + if cached != nil { + // Cached validation result + return schemaHash, cached.(*schemaValidationEntry).schema, nil + } c := cm.newFFISchemaCompiler() - if err := c.AddResource(param.Name, strings.NewReader(param.Schema.String())); err != nil { - return i18n.WrapError(ctx, err, coremsgs.MsgFFISchemaParseFail, param.Name) + if err := c.AddResource(param.Name, strings.NewReader(schemaString)); err != nil { + return "", nil, i18n.WrapError(ctx, err, coremsgs.MsgFFISchemaParseFail, param.Name) } - if _, err := c.Compile(param.Name); err != nil { - return i18n.WrapError(ctx, err, coremsgs.MsgFFISchemaCompileFail, param.Name) + schema, err := c.Compile(param.Name) + if err != nil || schema == nil { + return "", nil, i18n.WrapError(ctx, err, coremsgs.MsgFFISchemaCompileFail, param.Name) } - return nil + cm.methodCache.Set(cacheKey, &schemaValidationEntry{schema: schema}) + return schemaHash, schema, nil } func (cm *contractManager) validateFFIEvent(ctx context.Context, event *fftypes.FFIEventDefinition) error { @@ -636,28 +695,40 @@ func (cm *contractManager) validateFFIEvent(ctx context.Context, event *fftypes. return i18n.NewError(ctx, coremsgs.MsgEventNameMustBeSet) } for _, param := range event.Params { - if err := cm.validateFFIParam(ctx, param); err != nil { + if _, _, err := cm.validateFFIParam(ctx, param); err != nil { return err } } return nil } -func (cm *contractManager) validateFFIError(ctx context.Context, errorDef *fftypes.FFIErrorDefinition) error { +func (cm *contractManager) validateFFIError(ctx context.Context, errorDef *fftypes.FFIErrorDefinition) (string, error) { if errorDef.Name == "" { - return i18n.NewError(ctx, coremsgs.MsgErrorNameMustBeSet) + return "", i18n.NewError(ctx, coremsgs.MsgErrorNameMustBeSet) } + cacheKeyBuff := new(strings.Builder) // Build a big string of aggregate hashes for _, param := range errorDef.Params { - if err := cm.validateFFIParam(ctx, param); err != nil { - return err + paramCacheKey, _, err := cm.validateFFIParam(ctx, param) + if err != nil { + return "", err } + cacheKeyBuff.WriteString(param.Name) + cacheKeyBuff.WriteString(paramCacheKey) } - return nil + return cacheKeyBuff.String(), nil } -func (cm *contractManager) validateInvokeContractRequest(ctx context.Context, req *core.ContractCallRequest) error { - if err := cm.validateFFIMethod(ctx, req.Method); err != nil { - return err +func (cm *contractManager) validateInvokeContractRequest(ctx context.Context, req *core.ContractCallRequest, blockchainValidation bool) (interface{}, error) { + paramUniqueHash, paramSchemas, err := cm.validateFFIMethod(ctx, req.Method) + if err != nil { + return nil, err + } + for _, errDef := range req.Errors { + errorCacheKey, err := cm.validateFFIError(ctx, &errDef.FFIErrorDefinition) + if err != nil { + return nil, err + } + paramUniqueHash.Write([]byte(errorCacheKey)) } // Validate that all parameters are specified and are of reasonable JSON types to match the FFI @@ -667,27 +738,46 @@ func (cm *contractManager) validateInvokeContractRequest(ctx context.Context, re // (assume it will be used for sending the batch pin) lastIndex-- if lastIndex < 0 { - return i18n.NewError(ctx, coremsgs.MsgMethodDoesNotSupportPinning) + return nil, i18n.NewError(ctx, coremsgs.MsgMethodDoesNotSupportPinning) } // Also verify that the user didn't pass in a value for this last parameter lastParam := req.Method.Params[lastIndex] if _, ok := req.Input[lastParam.Name]; ok { - return i18n.NewError(ctx, coremsgs.MsgCannotSetParameterWithMessage, lastParam.Name) + return nil, i18n.NewError(ctx, coremsgs.MsgCannotSetParameterWithMessage, lastParam.Name) } } for _, param := range req.Method.Params[:lastIndex] { - value, ok := req.Input[param.Name] - if !ok { - return i18n.NewError(ctx, coremsgs.MsgContractMissingInputArgument, param.Name) + schema, schemaOk := paramSchemas[param.Name] + value, valueOk := req.Input[param.Name] + if !valueOk || !schemaOk { + return nil, i18n.NewError(ctx, coremsgs.MsgContractMissingInputArgument, param.Name) } - if err := cm.checkParamSchema(ctx, value, param); err != nil { - return err + if err := cm.checkParamSchema(ctx, param.Name, value, schema); err != nil { + return nil, err } } - // Allow the blockchain plugin to perform additional blockchain-specific parameter validation - return cm.blockchain.ValidateInvokeRequest(ctx, req.Method, req.Input, req.Errors, req.Message != nil) + // Now we need to ask the blockchain connector to do its own validation of the FFI. + // This is cached by the aggregate cache key we just built + cacheKey := "methodhash_" + req.Method.Name + "_" + hex.EncodeToString(paramUniqueHash.Sum(nil)) + bcParsedMethod := cm.methodCache.Get(cacheKey) + cacheMiss := bcParsedMethod == nil + if cacheMiss { + bcParsedMethod, err = cm.blockchain.ParseInterface(ctx, req.Method, req.Errors) + if err != nil { + return nil, err + } + cm.methodCache.Set(cacheKey, bcParsedMethod) + } + log.L(ctx).Debugf("Validating method '%s' (cacheMiss=%t)", cacheKey, cacheMiss) + + if blockchainValidation { + // Allow the blockchain plugin to perform additional blockchain-specific parameter validation. + // We only do this on API on the way in, not when this function is called later as part of the operation. + return bcParsedMethod, cm.blockchain.ValidateInvokeRequest(ctx, bcParsedMethod, req.Input, req.Message != nil) + } + return bcParsedMethod, nil } func (cm *contractManager) resolveEvent(ctx context.Context, ffi *fftypes.FFIReference, eventPath string) (*core.FFISerializedEvent, error) { @@ -769,10 +859,17 @@ func (cm *contractManager) AddContractListener(ctx context.Context, listener *co // Namespace + Topic + Location + Signature must be unique listener.Signature = cm.blockchain.GenerateEventSignature(ctx, &listener.Event.FFIEventDefinition) + // Above we only call NormalizeContractLocation if the listener is non-nil, and that means + // for an unset location we will have a nil value. Using an fftypes.JSONAny in a query + // of nil does not yield the right result, so we need to do an explicit nil query. + var locationLookup driver.Value = nil + if !listener.Location.IsNil() { + locationLookup = listener.Location.String() + } fb := database.ContractListenerQueryFactory.NewFilter(ctx) if existing, _, err := cm.database.GetContractListeners(ctx, cm.namespace, fb.And( fb.Eq("topic", listener.Topic), - fb.Eq("location", listener.Location.String()), + fb.Eq("location", locationLookup), fb.Eq("signature", listener.Signature), )); err != nil { return err @@ -897,26 +994,26 @@ func (cm *contractManager) DeleteContractListenerByNameOrID(ctx context.Context, }) } -func (cm *contractManager) checkParamSchema(ctx context.Context, input interface{}, param *fftypes.FFIParam) error { - // TODO: Cache the compiled schema? - c := jsonschema.NewCompiler() - err := c.AddResource(param.Name, strings.NewReader(param.Schema.String())) - if err != nil { - return i18n.WrapError(ctx, err, coremsgs.MsgFFISchemaParseFail, param.Name) - } - schema, err := c.Compile(param.Name) - if err != nil { - return i18n.WrapError(ctx, err, coremsgs.MsgFFIValidationFail, param.Name, param.Schema) - } +func (cm *contractManager) checkParamSchema(ctx context.Context, name string, input interface{}, schema *jsonschema.Schema) error { if err := schema.Validate(input); err != nil { - return i18n.WrapError(ctx, err, coremsgs.MsgFFIValidationFail, param.Name) + return i18n.WrapError(ctx, err, coremsgs.MsgFFIValidationFail, name) } return nil } func (cm *contractManager) GenerateFFI(ctx context.Context, generationRequest *fftypes.FFIGenerationRequest) (*fftypes.FFI, error) { generationRequest.Namespace = cm.namespace - return cm.blockchain.GenerateFFI(ctx, generationRequest) + if generationRequest.Name == "" { + generationRequest.Name = "generated" + } + if generationRequest.Version == "" { + generationRequest.Version = "0.0.1" + } + ffi, err := cm.blockchain.GenerateFFI(ctx, generationRequest) + if err == nil { + err = cm.ResolveFFI(ctx, ffi) + } + return ffi, err } func (cm *contractManager) getDefaultContractListenerOptions() *core.ContractListenerOptions { @@ -948,3 +1045,35 @@ func (cm *contractManager) buildInvokeMessage(ctx context.Context, in *core.Mess return nil, i18n.NewError(ctx, coremsgs.MsgInvalidMessageType, allowedTypes) } } + +func (cm *contractManager) DeleteFFI(ctx context.Context, id *fftypes.UUID) error { + return cm.database.RunAsGroup(ctx, func(ctx context.Context) error { + ffi, err := cm.GetFFIByID(ctx, id) + if err != nil { + return err + } + if ffi == nil { + return i18n.NewError(ctx, coremsgs.Msg404NotFound) + } + if ffi.Published { + return i18n.NewError(ctx, coremsgs.MsgCannotDeletePublished) + } + return cm.database.DeleteFFI(ctx, cm.namespace, id) + }) +} + +func (cm *contractManager) DeleteContractAPI(ctx context.Context, apiName string) error { + return cm.database.RunAsGroup(ctx, func(ctx context.Context) error { + api, err := cm.GetContractAPI(ctx, "", apiName) + if err != nil { + return err + } + if api == nil { + return i18n.NewError(ctx, coremsgs.Msg404NotFound) + } + if api.Published { + return i18n.NewError(ctx, coremsgs.MsgCannotDeletePublished) + } + return cm.database.DeleteContractAPI(ctx, cm.namespace, api.ID) + }) +} diff --git a/internal/contracts/manager_test.go b/internal/contracts/manager_test.go index eb20ba4cf9..304d05933d 100644 --- a/internal/contracts/manager_test.go +++ b/internal/contracts/manager_test.go @@ -18,6 +18,8 @@ package contracts import ( "context" + "encoding/hex" + "encoding/json" "errors" "fmt" "strings" @@ -45,6 +47,7 @@ import ( "github.com/hyperledger/firefly/mocks/privatemessagingmocks" "github.com/hyperledger/firefly/mocks/syncasyncmocks" "github.com/hyperledger/firefly/mocks/txcommonmocks" + "github.com/hyperledger/firefly/mocks/txwritermocks" "github.com/hyperledger/firefly/pkg/blockchain" "github.com/hyperledger/firefly/pkg/core" "github.com/hyperledger/firefly/pkg/database" @@ -62,6 +65,7 @@ func newTestContractManager() *contractManager { mim := &identitymanagermocks.Manager{} mbi := &blockchainmocks.Plugin{} mom := &operationmocks.Manager{} + txw := &txwritermocks.Writer{} ctx := context.Background() cmi := &cachemocks.Manager{} cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(ctx, 100, 5*time.Minute), nil) @@ -79,13 +83,13 @@ func newTestContractManager() *contractManager { a[1].(func(context.Context) error)(a[0].(context.Context)), } } - cm, _ := NewContractManager(context.Background(), "ns1", mdi, mbi, mdm, mbm, mpm, mbp, mim, mom, txHelper, msa) + cm, _ := NewContractManager(context.Background(), "ns1", mdi, mbi, mdm, mbm, mpm, mbp, mim, mom, txHelper, txw, msa, cmi) cm.(*contractManager).txHelper = &txcommonmocks.Helper{} return cm.(*contractManager) } func TestNewContractManagerFail(t *testing.T) { - _, err := NewContractManager(context.Background(), "", nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + _, err := NewContractManager(context.Background(), "", nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) assert.Regexp(t, "FF10128", err) } @@ -103,13 +107,33 @@ func TestNewContractManagerFFISchemaLoaderFail(t *testing.T) { mim := &identitymanagermocks.Manager{} mbi := &blockchainmocks.Plugin{} mom := &operationmocks.Manager{} + txw := &txwritermocks.Writer{} ctx := context.Background() cmi := &cachemocks.Manager{} cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(ctx, 100, 5*time.Minute), nil) txHelper, _ := txcommon.NewTransactionHelper(ctx, "ns1", mdi, mdm, cmi) msa := &syncasyncmocks.Bridge{} mbi.On("GetFFIParamValidator", mock.Anything).Return(nil, fmt.Errorf("pop")) - _, err := NewContractManager(context.Background(), "ns1", mdi, mbi, mdm, mbm, mpm, mbp, mim, mom, txHelper, msa) + _, err := NewContractManager(context.Background(), "ns1", mdi, mbi, mdm, mbm, mpm, mbp, mim, mom, txHelper, txw, msa, cmi) + assert.Regexp(t, "pop", err) +} + +func TestNewContractManagerCacheConfigFail(t *testing.T) { + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} + mbm := &broadcastmocks.Manager{} + mpm := &privatemessagingmocks.Manager{} + mbp := &batchmocks.Manager{} + mim := &identitymanagermocks.Manager{} + mbi := &blockchainmocks.Plugin{} + mom := &operationmocks.Manager{} + txw := &txwritermocks.Writer{} + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(nil, fmt.Errorf("pop")) + txHelper := &txcommonmocks.Helper{} + msa := &syncasyncmocks.Bridge{} + mbi.On("GetFFIParamValidator", mock.Anything).Return(nil, nil) + _, err := NewContractManager(context.Background(), "ns1", mdi, mbi, mdm, mbm, mpm, mbp, mim, mom, txHelper, txw, msa, cmi) assert.Regexp(t, "pop", err) } @@ -122,6 +146,7 @@ func TestNewContractManagerFFISchemaLoader(t *testing.T) { mim := &identitymanagermocks.Manager{} mbi := &blockchainmocks.Plugin{} mom := &operationmocks.Manager{} + txw := &txwritermocks.Writer{} ctx := context.Background() cmi := &cachemocks.Manager{} cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(ctx, 100, 5*time.Minute), nil) @@ -130,7 +155,7 @@ func TestNewContractManagerFFISchemaLoader(t *testing.T) { mdi.On("GetContractListeners", mock.Anything, "ns1", mock.Anything).Return(nil, nil, nil) mbi.On("GetFFIParamValidator", mock.Anything).Return(&ffi2abi.ParamValidator{}, nil) mom.On("RegisterHandler", mock.Anything, mock.Anything, mock.Anything) - _, err := NewContractManager(context.Background(), "ns1", mdi, mbi, mdm, mbm, mpm, mbp, mim, mom, txHelper, msa) + _, err := NewContractManager(context.Background(), "ns1", mdi, mbi, mdm, mbm, mpm, mbp, mim, mom, txHelper, txw, msa, cmi) assert.NoError(t, err) } @@ -172,9 +197,6 @@ func TestResolveFFI(t *testing.T) { func TestBroadcastFFIInvalid(t *testing.T) { cm := newTestContractManager() - mdb := cm.database.(*databasemocks.Plugin) - - mdb.On("GetFFI", mock.Anything, "ns1", "test", "1.0.0").Return(nil, nil) ffi := &fftypes.FFI{ Namespace: "ns1", @@ -196,37 +218,6 @@ func TestBroadcastFFIInvalid(t *testing.T) { err := cm.ResolveFFI(context.Background(), ffi) assert.Regexp(t, "does not validate", err) - - mdb.AssertExpectations(t) -} - -func TestResolveFFIExists(t *testing.T) { - cm := newTestContractManager() - mdb := cm.database.(*databasemocks.Plugin) - - mdb.On("GetFFI", mock.Anything, "ns1", "test", "1.0.0").Return(&fftypes.FFI{}, nil) - - ffi := &fftypes.FFI{ - Namespace: "ns1", - Name: "test", - Version: "1.0.0", - ID: fftypes.NewUUID(), - Methods: []*fftypes.FFIMethod{ - { - Name: "sum", - }, - }, - Events: []*fftypes.FFIEvent{ - { - FFIEventDefinition: fftypes.FFIEventDefinition{ - Name: "changed", - }, - }, - }, - } - - err := cm.ResolveFFI(context.Background(), ffi) - assert.Regexp(t, "FF10302", err) } func TestValidateInvokeContractRequest(t *testing.T) { @@ -259,9 +250,11 @@ func TestValidateInvokeContractRequest(t *testing.T) { } mbi := cm.blockchain.(*blockchainmocks.Plugin) - mbi.On("ValidateInvokeRequest", context.Background(), req.Method, req.Input, req.Errors, false).Return(nil) + opaqueData := "anything" + mbi.On("ParseInterface", context.Background(), req.Method, req.Errors).Return(opaqueData, nil) + mbi.On("ValidateInvokeRequest", context.Background(), opaqueData, req.Input, false).Return(nil) - err := cm.validateInvokeContractRequest(context.Background(), req) + _, err := cm.validateInvokeContractRequest(context.Background(), req, true) assert.NoError(t, err) mbi.AssertExpectations(t) @@ -290,11 +283,24 @@ func TestValidateInvokeContractRequestMissingInput(t *testing.T) { }, }, }, + Errors: []*fftypes.FFIError{ + { + FFIErrorDefinition: fftypes.FFIErrorDefinition{ + Name: "u", + Params: fftypes.FFIParams{ + { + Name: "t", + Schema: fftypes.JSONAnyPtr(`{"type": "integer", "details": {"type": "uint256"}}`), + }, + }, + }, + }, + }, Input: map[string]interface{}{ "x": float64(1), }, } - err := cm.validateInvokeContractRequest(context.Background(), req) + _, err := cm.validateInvokeContractRequest(context.Background(), req, true) assert.Regexp(t, "Missing required input argument 'y'", err) } @@ -326,10 +332,55 @@ func TestValidateInvokeContractRequestInputWrongType(t *testing.T) { "y": "two", }, } - err := cm.validateInvokeContractRequest(context.Background(), req) + _, err := cm.validateInvokeContractRequest(context.Background(), req, true) assert.Regexp(t, "expected integer, but got string", err) } +func TestValidateInvokeContractRequestErrorInvalid(t *testing.T) { + cm := newTestContractManager() + req := &core.ContractCallRequest{ + Type: core.CallTypeInvoke, + Method: &fftypes.FFIMethod{ + Name: "sum", + Params: []*fftypes.FFIParam{ + { + Name: "x", + Schema: fftypes.JSONAnyPtr(`{"type": "integer", "details": {"type": "uint256"}}`), + }, + { + Name: "y", + Schema: fftypes.JSONAnyPtr(`{"type": "integer", "details": {"type": "uint256"}}`), + }, + }, + Returns: []*fftypes.FFIParam{ + { + Name: "z", + Schema: fftypes.JSONAnyPtr(`{"type": "integer", "details": {"type": "uint256"}}`), + }, + }, + }, + Errors: []*fftypes.FFIError{ + { + FFIErrorDefinition: fftypes.FFIErrorDefinition{ + Name: "u", + Params: fftypes.FFIParams{ + { + Name: "t", + Schema: fftypes.JSONAnyPtr(`{"type": "wrong"}`), + }, + }, + }, + }, + }, + Input: map[string]interface{}{ + "x": float64(1), + "y": "two", + }, + } + _, err := cm.validateInvokeContractRequest(context.Background(), req, true) + assert.Regexp(t, "FF10333", err) +} + func TestValidateInvokeContractRequestInvalidParam(t *testing.T) { cm := newTestContractManager() req := &core.ContractCallRequest{ @@ -359,7 +410,7 @@ func TestValidateInvokeContractRequestInvalidParam(t *testing.T) { }, } - err := cm.validateInvokeContractRequest(context.Background(), req) + _, err := cm.validateInvokeContractRequest(context.Background(), req, true) assert.Regexp(t, "does not validate", err) } @@ -392,7 +443,7 @@ func TestValidateInvokeContractRequestInvalidReturn(t *testing.T) { }, } - err := cm.validateInvokeContractRequest(context.Background(), req) + _, err := cm.validateInvokeContractRequest(context.Background(), req, true) assert.Regexp(t, "does not validate", err) } @@ -737,6 +788,46 @@ func TestAddContractListenerInline(t *testing.T) { mdi.AssertExpectations(t) } +func TestAddContractListenerInlineNilLocation(t *testing.T) { + cm := newTestContractManager() + mbi := cm.blockchain.(*blockchainmocks.Plugin) + mdi := cm.database.(*databasemocks.Plugin) + + sub := &core.ContractListenerInput{ + ContractListener: core.ContractListener{ + Event: &core.FFISerializedEvent{ + FFIEventDefinition: fftypes.FFIEventDefinition{ + Name: "changed", + Params: fftypes.FFIParams{ + { + Name: "value", + Schema: fftypes.JSONAnyPtr(`{"type": "integer"}`), + }, + }, + }, + }, + Options: &core.ContractListenerOptions{}, + Topic: "test-topic", + }, + } + + mbi.On("GenerateEventSignature", context.Background(), mock.Anything).Return("changed") + mdi.On("GetContractListeners", context.Background(), "ns1", mock.Anything).Return(nil, nil, nil) + mbi.On("AddContractListener", context.Background(), mock.MatchedBy(func(cl *core.ContractListener) bool { + // Normalize is not called for this case + return cl.Location == nil + })).Return(nil) + mdi.On("InsertContractListener", context.Background(), &sub.ContractListener).Return(nil) + + result, err := cm.AddContractListener(context.Background(), sub) + assert.NoError(t, err) + assert.NotNil(t, result.ID) + assert.NotNil(t, result.Event) + + mbi.AssertExpectations(t) + mdi.AssertExpectations(t) +} + func TestAddContractListenerNoLocationOK(t *testing.T) { cm := newTestContractManager() mbi := cm.blockchain.(*blockchainmocks.Plugin) @@ -1431,6 +1522,22 @@ func TestGetFFI(t *testing.T) { assert.NoError(t, err) } +func TestGetFFINotFound(t *testing.T) { + cm := newTestContractManager() + mdb := cm.database.(*databasemocks.Plugin) + mdb.On("GetFFI", mock.Anything, "ns1", "ffi", "v1.0.0").Return(nil, nil) + _, err := cm.GetFFI(context.Background(), "ffi", "v1.0.0") + assert.Regexp(t, "FF10109", err) +} + +func TestGetFFIFail(t *testing.T) { + cm := newTestContractManager() + mdb := cm.database.(*databasemocks.Plugin) + mdb.On("GetFFI", mock.Anything, "ns1", "ffi", "v1.0.0").Return(nil, fmt.Errorf("pop")) + _, err := cm.GetFFI(context.Background(), "ffi", "v1.0.0") + assert.EqualError(t, err, "pop") +} + func TestGetFFIWithChildren(t *testing.T) { cm := newTestContractManager() mdb := cm.database.(*databasemocks.Plugin) @@ -1602,6 +1709,7 @@ func TestDeployContract(t *testing.T) { mdi := cm.database.(*databasemocks.Plugin) mth := cm.txHelper.(*txcommonmocks.Helper) mom := cm.operations.(*operationmocks.Manager) + txw := cm.txWriter.(*txwritermocks.Writer) signingKey := "0x2468" req := &core.ContractDeployRequest{ Key: signingKey, @@ -1611,15 +1719,14 @@ func TestDeployContract(t *testing.T) { IdempotencyKey: "idem1", } - mth.On("SubmitNewTransaction", mock.Anything, core.TransactionTypeContractDeploy, core.IdempotencyKey("idem1")).Return(fftypes.NewUUID(), nil) - mim.On("ResolveInputSigningKey", mock.Anything, signingKey, identity.KeyNormalizationBlockchainPlugin).Return("key-resolved", nil) - mom.On("AddOrReuseOperation", mock.Anything, mock.MatchedBy(func(op *core.Operation) bool { + txw.On("WriteTransactionAndOps", mock.Anything, core.TransactionTypeContractDeploy, core.IdempotencyKey("idem1"), mock.MatchedBy(func(op *core.Operation) bool { return op.Namespace == "ns1" && op.Type == core.OpTypeBlockchainContractDeploy && op.Plugin == "mockblockchain" - })).Return(nil) + })).Return(&core.Transaction{ID: fftypes.NewUUID()}, nil) + mim.On("ResolveInputSigningKey", mock.Anything, signingKey, identity.KeyNormalizationBlockchainPlugin).Return("key-resolved", nil) mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(blockchainContractDeployData) return op.Type == core.OpTypeBlockchainContractDeploy && data.Request == req - })).Return(nil, nil) + }), true).Return(nil, nil) _, err := cm.DeployContract(context.Background(), req, false) @@ -1637,6 +1744,7 @@ func TestDeployContractIdempotentResubmitOperation(t *testing.T) { mim := cm.identity.(*identitymanagermocks.Manager) mth := cm.txHelper.(*txcommonmocks.Helper) mom := cm.operations.(*operationmocks.Manager) + txw := cm.txWriter.(*txwritermocks.Writer) signingKey := "0x2468" req := &core.ContractDeployRequest{ Key: signingKey, @@ -1646,10 +1754,12 @@ func TestDeployContractIdempotentResubmitOperation(t *testing.T) { IdempotencyKey: "idem1", } - mth.On("SubmitNewTransaction", mock.Anything, core.TransactionTypeContractDeploy, core.IdempotencyKey("idem1")).Return(fftypes.NewUUID(), &sqlcommon.IdempotencyError{ + txw.On("WriteTransactionAndOps", mock.Anything, core.TransactionTypeContractDeploy, core.IdempotencyKey("idem1"), mock.MatchedBy(func(op *core.Operation) bool { + return op.Namespace == "ns1" && op.Type == core.OpTypeBlockchainContractDeploy && op.Plugin == "mockblockchain" + })).Return(nil, &sqlcommon.IdempotencyError{ ExistingTXID: id, OriginalError: i18n.NewError(context.Background(), coremsgs.MsgIdempotencyKeyDuplicateTransaction, "idem1", id)}) - mom.On("ResubmitOperations", context.Background(), id).Return(&core.Operation{}, nil) + mom.On("ResubmitOperations", context.Background(), id).Return(1, []*core.Operation{{}}, nil) mim.On("ResolveInputSigningKey", mock.Anything, signingKey, identity.KeyNormalizationBlockchainPlugin).Return("key-resolved", nil) // If ResubmitOperations returns an operation it's because it found one to resubmit, so we return 2xx not 409, and don't expect an error @@ -1667,6 +1777,7 @@ func TestDeployContractIdempotentNoOperationToResubmit(t *testing.T) { mim := cm.identity.(*identitymanagermocks.Manager) mth := cm.txHelper.(*txcommonmocks.Helper) mom := cm.operations.(*operationmocks.Manager) + txw := cm.txWriter.(*txwritermocks.Writer) signingKey := "0x2468" req := &core.ContractDeployRequest{ Key: signingKey, @@ -1676,10 +1787,12 @@ func TestDeployContractIdempotentNoOperationToResubmit(t *testing.T) { IdempotencyKey: "idem1", } - mth.On("SubmitNewTransaction", mock.Anything, core.TransactionTypeContractDeploy, core.IdempotencyKey("idem1")).Return(fftypes.NewUUID(), &sqlcommon.IdempotencyError{ + txw.On("WriteTransactionAndOps", mock.Anything, core.TransactionTypeContractDeploy, core.IdempotencyKey("idem1"), mock.MatchedBy(func(op *core.Operation) bool { + return op.Namespace == "ns1" && op.Type == core.OpTypeBlockchainContractDeploy && op.Plugin == "mockblockchain" + })).Return(nil, &sqlcommon.IdempotencyError{ ExistingTXID: id, OriginalError: i18n.NewError(context.Background(), coremsgs.MsgIdempotencyKeyDuplicateTransaction, "idem1", id)}) - mom.On("ResubmitOperations", context.Background(), id).Return(nil, nil) + mom.On("ResubmitOperations", context.Background(), id).Return(1 /* total */, nil /* to resubmit */, nil) mim.On("ResolveInputSigningKey", mock.Anything, signingKey, identity.KeyNormalizationBlockchainPlugin).Return("key-resolved", nil) // If ResubmitOperations returns nil it's because there was no operation in initialized state, so we expect the regular 409 error back @@ -1698,6 +1811,7 @@ func TestDeployContractIdempotentErrorOnOperationResubmit(t *testing.T) { mim := cm.identity.(*identitymanagermocks.Manager) mth := cm.txHelper.(*txcommonmocks.Helper) mom := cm.operations.(*operationmocks.Manager) + txw := cm.txWriter.(*txwritermocks.Writer) signingKey := "0x2468" req := &core.ContractDeployRequest{ Key: signingKey, @@ -1707,10 +1821,12 @@ func TestDeployContractIdempotentErrorOnOperationResubmit(t *testing.T) { IdempotencyKey: "idem1", } - mth.On("SubmitNewTransaction", mock.Anything, core.TransactionTypeContractDeploy, core.IdempotencyKey("idem1")).Return(fftypes.NewUUID(), &sqlcommon.IdempotencyError{ + txw.On("WriteTransactionAndOps", mock.Anything, core.TransactionTypeContractDeploy, core.IdempotencyKey("idem1"), mock.MatchedBy(func(op *core.Operation) bool { + return op.Namespace == "ns1" && op.Type == core.OpTypeBlockchainContractDeploy && op.Plugin == "mockblockchain" + })).Return(nil, &sqlcommon.IdempotencyError{ ExistingTXID: id, OriginalError: i18n.NewError(context.Background(), coremsgs.MsgIdempotencyKeyDuplicateTransaction, "idem1", id)}) - mom.On("ResubmitOperations", context.Background(), id).Return(nil, fmt.Errorf("pop")) + mom.On("ResubmitOperations", context.Background(), id).Return(-1, nil, fmt.Errorf("pop")) mim.On("ResolveInputSigningKey", mock.Anything, signingKey, identity.KeyNormalizationBlockchainPlugin).Return("key-resolved", nil) _, err := cm.DeployContract(context.Background(), req, false) @@ -1729,6 +1845,7 @@ func TestDeployContractSync(t *testing.T) { mth := cm.txHelper.(*txcommonmocks.Helper) mom := cm.operations.(*operationmocks.Manager) sam := cm.syncasync.(*syncasyncmocks.Bridge) + txw := cm.txWriter.(*txwritermocks.Writer) signingKey := "0x2468" req := &core.ContractDeployRequest{ Key: signingKey, @@ -1738,11 +1855,10 @@ func TestDeployContractSync(t *testing.T) { IdempotencyKey: "idem1", } - mth.On("SubmitNewTransaction", mock.Anything, core.TransactionTypeContractDeploy, core.IdempotencyKey("idem1")).Return(fftypes.NewUUID(), nil) - mim.On("ResolveInputSigningKey", mock.Anything, signingKey, identity.KeyNormalizationBlockchainPlugin).Return("key-resolved", nil) - mom.On("AddOrReuseOperation", mock.Anything, mock.MatchedBy(func(op *core.Operation) bool { + txw.On("WriteTransactionAndOps", mock.Anything, core.TransactionTypeContractDeploy, core.IdempotencyKey("idem1"), mock.MatchedBy(func(op *core.Operation) bool { return op.Namespace == "ns1" && op.Type == core.OpTypeBlockchainContractDeploy && op.Plugin == "mockblockchain" - })).Return(nil) + })).Return(&core.Transaction{ID: fftypes.NewUUID()}, nil) + mim.On("ResolveInputSigningKey", mock.Anything, signingKey, identity.KeyNormalizationBlockchainPlugin).Return("key-resolved", nil) sam.On("WaitForDeployOperation", mock.Anything, mock.Anything, mock.Anything).Return(&core.Operation{Status: core.OpStatusSucceeded}, nil) @@ -1787,6 +1903,7 @@ func TestDeployContractSubmitNewTransactionFail(t *testing.T) { mdi := cm.database.(*databasemocks.Plugin) mth := cm.txHelper.(*txcommonmocks.Helper) mom := cm.operations.(*operationmocks.Manager) + txw := cm.txWriter.(*txwritermocks.Writer) signingKey := "0x2468" req := &core.ContractDeployRequest{ Key: signingKey, @@ -1796,7 +1913,9 @@ func TestDeployContractSubmitNewTransactionFail(t *testing.T) { IdempotencyKey: "idem1", } - mth.On("SubmitNewTransaction", mock.Anything, core.TransactionTypeContractDeploy, core.IdempotencyKey("idem1")).Return(nil, errors.New("pop")) + txw.On("WriteTransactionAndOps", mock.Anything, core.TransactionTypeContractDeploy, core.IdempotencyKey("idem1"), mock.MatchedBy(func(op *core.Operation) bool { + return op.Namespace == "ns1" && op.Type == core.OpTypeBlockchainContractDeploy && op.Plugin == "mockblockchain" + })).Return(nil, errors.New("pop")) mim.On("ResolveInputSigningKey", mock.Anything, signingKey, identity.KeyNormalizationBlockchainPlugin).Return("key-resolved", nil) _, err := cm.DeployContract(context.Background(), req, false) @@ -1816,6 +1935,7 @@ func TestInvokeContract(t *testing.T) { mth := cm.txHelper.(*txcommonmocks.Helper) mom := cm.operations.(*operationmocks.Manager) mbi := cm.blockchain.(*blockchainmocks.Plugin) + txw := cm.txWriter.(*txwritermocks.Writer) req := &core.ContractCallRequest{ Type: core.CallTypeInvoke, @@ -1830,16 +1950,68 @@ func TestInvokeContract(t *testing.T) { IdempotencyKey: "idem1", } - mth.On("SubmitNewTransaction", mock.Anything, core.TransactionTypeContractInvoke, core.IdempotencyKey("idem1")).Return(fftypes.NewUUID(), nil) + txw.On("WriteTransactionAndOps", mock.Anything, core.TransactionTypeContractInvoke, core.IdempotencyKey("idem1"), mock.MatchedBy(func(op *core.Operation) bool { + return op.Namespace == "ns1" && op.Type == core.OpTypeBlockchainInvoke && op.Plugin == "mockblockchain" + })).Return(&core.Transaction{ID: fftypes.NewUUID()}, nil) mim.On("ResolveInputSigningKey", mock.Anything, "", identity.KeyNormalizationBlockchainPlugin).Return("key-resolved", nil) - mom.On("AddOrReuseOperation", mock.Anything, mock.MatchedBy(func(op *core.Operation) bool { + mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *core.PreparedOperation) bool { + data := op.Data.(txcommon.BlockchainInvokeData) + return op.Type == core.OpTypeBlockchainInvoke && data.Request == req + }), true).Return(nil, nil) + opaqueData := "anything" + mbi.On("ParseInterface", context.Background(), req.Method, req.Errors).Return(opaqueData, nil) + mbi.On("ValidateInvokeRequest", mock.Anything, opaqueData, req.Input, false).Return(nil) + + _, err := cm.InvokeContract(context.Background(), req, false) + + assert.NoError(t, err) + + mth.AssertExpectations(t) + mim.AssertExpectations(t) + mdi.AssertExpectations(t) + mom.AssertExpectations(t) + mbi.AssertExpectations(t) +} + +func TestInvokeContractViaFFI(t *testing.T) { + cm := newTestContractManager() + mim := cm.identity.(*identitymanagermocks.Manager) + mdi := cm.database.(*databasemocks.Plugin) + mth := cm.txHelper.(*txcommonmocks.Helper) + mom := cm.operations.(*operationmocks.Manager) + mbi := cm.blockchain.(*blockchainmocks.Plugin) + txw := cm.txWriter.(*txwritermocks.Writer) + + req := &core.ContractCallRequest{ + Type: core.CallTypeInvoke, + Interface: fftypes.NewUUID(), + Location: fftypes.JSONAnyPtr(""), + MethodPath: "doStuff", + IdempotencyKey: "idem1", + } + + method := &fftypes.FFIMethod{ + Name: "doStuff", + ID: fftypes.NewUUID(), + Params: fftypes.FFIParams{}, + Returns: fftypes.FFIParams{}, + } + errors := []*fftypes.FFIError{} + txw.On("WriteTransactionAndOps", mock.Anything, core.TransactionTypeContractInvoke, core.IdempotencyKey("idem1"), mock.MatchedBy(func(op *core.Operation) bool { return op.Namespace == "ns1" && op.Type == core.OpTypeBlockchainInvoke && op.Plugin == "mockblockchain" - })).Return(nil) + })).Return(&core.Transaction{ID: fftypes.NewUUID()}, nil) + mim.On("ResolveInputSigningKey", mock.Anything, "", identity.KeyNormalizationBlockchainPlugin).Return("key-resolved", nil) mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(txcommon.BlockchainInvokeData) return op.Type == core.OpTypeBlockchainInvoke && data.Request == req - })).Return(nil, nil) - mbi.On("ValidateInvokeRequest", mock.Anything, req.Method, req.Input, req.Errors, false).Return(nil) + }), true).Return(nil, nil) + opaqueData := "anything" + mbi.On("ParseInterface", context.Background(), method, errors).Return(opaqueData, nil) + mbi.On("ValidateInvokeRequest", mock.Anything, opaqueData, req.Input, false).Return(nil) + + mdb := cm.database.(*databasemocks.Plugin) + mdb.On("GetFFIMethod", mock.Anything, "ns1", req.Interface, req.MethodPath).Return(method, nil) + mdb.On("GetFFIErrors", mock.Anything, "ns1", mock.Anything).Return(errors, nil, nil) _, err := cm.InvokeContract(context.Background(), req, false) @@ -1860,6 +2032,7 @@ func TestInvokeContractWithBroadcast(t *testing.T) { mom := cm.operations.(*operationmocks.Manager) mbi := cm.blockchain.(*blockchainmocks.Plugin) mbm := cm.broadcast.(*broadcastmocks.Manager) + txw := cm.txWriter.(*txwritermocks.Writer) sender := &syncasyncmocks.Sender{} req := &core.ContractCallRequest{ @@ -1885,12 +2058,13 @@ func TestInvokeContractWithBroadcast(t *testing.T) { }, } - mth.On("SubmitNewTransaction", mock.Anything, core.TransactionTypeContractInvokePin, core.IdempotencyKey("idem1")).Return(fftypes.NewUUID(), nil) - mim.On("ResolveInputSigningKey", mock.Anything, "", identity.KeyNormalizationBlockchainPlugin).Return("key-resolved", nil) - mom.On("AddOrReuseOperation", mock.Anything, mock.MatchedBy(func(op *core.Operation) bool { + txw.On("WriteTransactionAndOps", mock.Anything, core.TransactionTypeContractInvokePin, core.IdempotencyKey("idem1"), mock.MatchedBy(func(op *core.Operation) bool { return op.Namespace == "ns1" && op.Type == core.OpTypeBlockchainInvoke && op.Plugin == "mockblockchain" - })).Return(nil) - mbi.On("ValidateInvokeRequest", mock.Anything, req.Method, req.Input, req.Errors, true).Return(nil) + })).Return(&core.Transaction{ID: fftypes.NewUUID()}, nil) + mim.On("ResolveInputSigningKey", mock.Anything, "", identity.KeyNormalizationBlockchainPlugin).Return("key-resolved", nil) + opaqueData := "anything" + mbi.On("ParseInterface", context.Background(), req.Method, req.Errors).Return(opaqueData, nil) + mbi.On("ValidateInvokeRequest", mock.Anything, opaqueData, req.Input, true).Return(nil) mbm.On("NewBroadcast", req.Message).Return(sender, nil) sender.On("Prepare", mock.Anything).Return(nil) sender.On("Send", mock.Anything).Return(nil) @@ -1916,6 +2090,7 @@ func TestInvokeContractWithBroadcastConfirm(t *testing.T) { mom := cm.operations.(*operationmocks.Manager) mbi := cm.blockchain.(*blockchainmocks.Plugin) mbm := cm.broadcast.(*broadcastmocks.Manager) + txw := cm.txWriter.(*txwritermocks.Writer) sender := &syncasyncmocks.Sender{} req := &core.ContractCallRequest{ @@ -1941,12 +2116,13 @@ func TestInvokeContractWithBroadcastConfirm(t *testing.T) { }, } - mth.On("SubmitNewTransaction", mock.Anything, core.TransactionTypeContractInvokePin, core.IdempotencyKey("idem1")).Return(fftypes.NewUUID(), nil) - mim.On("ResolveInputSigningKey", mock.Anything, "", identity.KeyNormalizationBlockchainPlugin).Return("key-resolved", nil) - mom.On("AddOrReuseOperation", mock.Anything, mock.MatchedBy(func(op *core.Operation) bool { + txw.On("WriteTransactionAndOps", mock.Anything, core.TransactionTypeContractInvokePin, core.IdempotencyKey("idem1"), mock.MatchedBy(func(op *core.Operation) bool { return op.Namespace == "ns1" && op.Type == core.OpTypeBlockchainInvoke && op.Plugin == "mockblockchain" - })).Return(nil) - mbi.On("ValidateInvokeRequest", mock.Anything, req.Method, req.Input, req.Errors, true).Return(nil) + })).Return(&core.Transaction{ID: fftypes.NewUUID()}, nil) + mim.On("ResolveInputSigningKey", mock.Anything, "", identity.KeyNormalizationBlockchainPlugin).Return("key-resolved", nil) + opaqueData := "anything" + mbi.On("ParseInterface", context.Background(), req.Method, req.Errors).Return(opaqueData, nil) + mbi.On("ValidateInvokeRequest", mock.Anything, opaqueData, req.Input, true).Return(nil) mbm.On("NewBroadcast", req.Message).Return(sender, nil) sender.On("Prepare", mock.Anything).Return(nil) sender.On("SendAndWait", mock.Anything).Return(nil) @@ -1996,7 +2172,9 @@ func TestInvokeContractWithBroadcastPrepareFail(t *testing.T) { } mim.On("ResolveInputSigningKey", mock.Anything, "", identity.KeyNormalizationBlockchainPlugin).Return("key-resolved", nil) - mbi.On("ValidateInvokeRequest", mock.Anything, req.Method, req.Input, req.Errors, true).Return(nil) + opaqueData := "anything" + mbi.On("ParseInterface", context.Background(), req.Method, req.Errors).Return(opaqueData, nil) + mbi.On("ValidateInvokeRequest", mock.Anything, opaqueData, req.Input, true).Return(nil) mbm.On("NewBroadcast", req.Message).Return(sender, nil) sender.On("Prepare", mock.Anything).Return(fmt.Errorf("pop")) @@ -2204,26 +2382,40 @@ func TestInvokeContractIdempotentResubmitOperation(t *testing.T) { mth := cm.txHelper.(*txcommonmocks.Helper) mom := cm.operations.(*operationmocks.Manager) mbm := cm.blockchain.(*blockchainmocks.Plugin) + mbrm := cm.broadcast.(*broadcastmocks.Manager) + txw := cm.txWriter.(*txwritermocks.Writer) + sender := &syncasyncmocks.Sender{} req := &core.ContractCallRequest{ Type: core.CallTypeInvoke, Interface: fftypes.NewUUID(), Location: fftypes.JSONAnyPtr(""), Method: &fftypes.FFIMethod{ - Name: "doStuff", - ID: fftypes.NewUUID(), - Params: fftypes.FFIParams{}, + Name: "doStuff", + ID: fftypes.NewUUID(), + Params: fftypes.FFIParams{ + { + Name: "data", + Schema: fftypes.JSONAnyPtr(`{"type":"string"}`), + }}, Returns: fftypes.FFIParams{}, }, + Message: &core.MessageInOut{}, IdempotencyKey: "idem1", } - mth.On("SubmitNewTransaction", mock.Anything, core.TransactionTypeContractInvoke, core.IdempotencyKey("idem1")).Return(fftypes.NewUUID(), &sqlcommon.IdempotencyError{ + mbrm.On("NewBroadcast", req.Message).Return(sender, nil) + txw.On("WriteTransactionAndOps", mock.Anything, core.TransactionTypeContractInvokePin, core.IdempotencyKey("idem1"), mock.MatchedBy(func(op *core.Operation) bool { + return op.Namespace == "ns1" && op.Type == core.OpTypeBlockchainInvoke && op.Plugin == "mockblockchain" + })).Return(nil, &sqlcommon.IdempotencyError{ ExistingTXID: id, OriginalError: i18n.NewError(context.Background(), coremsgs.MsgIdempotencyKeyDuplicateTransaction, "idem1", id)}) - mom.On("ResubmitOperations", context.Background(), id).Return(&core.Operation{}, nil) + mom.On("ResubmitOperations", context.Background(), id).Return(1, []*core.Operation{{}}, nil) mim.On("ResolveInputSigningKey", mock.Anything, "", identity.KeyNormalizationBlockchainPlugin).Return("key-resolved", nil) - mbm.On("ValidateInvokeRequest", context.Background(), req.Method, req.Input, req.Errors, false).Return(nil) + opaqueData := "anything" + mbm.On("ParseInterface", context.Background(), req.Method, req.Errors).Return(opaqueData, nil) + sender.On("Prepare", mock.Anything).Return(nil) // we won't do send though + mbm.On("ValidateInvokeRequest", context.Background(), opaqueData, req.Input, true).Return(nil) // If ResubmitOperations returns an operation it's because it found one to resubmit, so we return 2xx not 409, and don't expect an error _, err := cm.InvokeContract(context.Background(), req, false) @@ -2242,6 +2434,7 @@ func TestInvokeContractIdempotentNoOperationToResubmit(t *testing.T) { mth := cm.txHelper.(*txcommonmocks.Helper) mom := cm.operations.(*operationmocks.Manager) mbm := cm.blockchain.(*blockchainmocks.Plugin) + txw := cm.txWriter.(*txwritermocks.Writer) req := &core.ContractCallRequest{ Type: core.CallTypeInvoke, @@ -2256,12 +2449,16 @@ func TestInvokeContractIdempotentNoOperationToResubmit(t *testing.T) { IdempotencyKey: "idem1", } - mth.On("SubmitNewTransaction", mock.Anything, core.TransactionTypeContractInvoke, core.IdempotencyKey("idem1")).Return(fftypes.NewUUID(), &sqlcommon.IdempotencyError{ + txw.On("WriteTransactionAndOps", mock.Anything, core.TransactionTypeContractInvoke, core.IdempotencyKey("idem1"), mock.MatchedBy(func(op *core.Operation) bool { + return op.Namespace == "ns1" && op.Type == core.OpTypeBlockchainInvoke && op.Plugin == "mockblockchain" + })).Return(nil, &sqlcommon.IdempotencyError{ ExistingTXID: id, OriginalError: i18n.NewError(context.Background(), coremsgs.MsgIdempotencyKeyDuplicateTransaction, "idem1", id)}) - mom.On("ResubmitOperations", context.Background(), id).Return(nil, nil) + mom.On("ResubmitOperations", context.Background(), id).Return(1 /* total */, nil /* to resubmit */, nil) mim.On("ResolveInputSigningKey", mock.Anything, "", identity.KeyNormalizationBlockchainPlugin).Return("key-resolved", nil) - mbm.On("ValidateInvokeRequest", context.Background(), req.Method, req.Input, req.Errors, false).Return(nil) + opaqueData := "anything" + mbm.On("ParseInterface", context.Background(), req.Method, req.Errors).Return(opaqueData, nil) + mbm.On("ValidateInvokeRequest", context.Background(), opaqueData, req.Input, false).Return(nil) // If ResubmitOperations returns nil it's because there was no operation in initialized state, so we expect the regular 409 error back _, err := cm.InvokeContract(context.Background(), req, false) @@ -2281,6 +2478,7 @@ func TestInvokeContractIdempotentErrorOnOperationResubmit(t *testing.T) { mth := cm.txHelper.(*txcommonmocks.Helper) mom := cm.operations.(*operationmocks.Manager) mbm := cm.blockchain.(*blockchainmocks.Plugin) + txw := cm.txWriter.(*txwritermocks.Writer) req := &core.ContractCallRequest{ Type: core.CallTypeInvoke, @@ -2295,12 +2493,16 @@ func TestInvokeContractIdempotentErrorOnOperationResubmit(t *testing.T) { IdempotencyKey: "idem1", } - mth.On("SubmitNewTransaction", mock.Anything, core.TransactionTypeContractInvoke, core.IdempotencyKey("idem1")).Return(fftypes.NewUUID(), &sqlcommon.IdempotencyError{ + txw.On("WriteTransactionAndOps", mock.Anything, core.TransactionTypeContractInvoke, core.IdempotencyKey("idem1"), mock.MatchedBy(func(op *core.Operation) bool { + return op.Namespace == "ns1" && op.Type == core.OpTypeBlockchainInvoke && op.Plugin == "mockblockchain" + })).Return(nil, &sqlcommon.IdempotencyError{ ExistingTXID: id, OriginalError: i18n.NewError(context.Background(), coremsgs.MsgIdempotencyKeyDuplicateTransaction, "idem1", id)}) - mom.On("ResubmitOperations", context.Background(), id).Return(nil, fmt.Errorf("pop")) + mom.On("ResubmitOperations", context.Background(), id).Return(-1, nil, fmt.Errorf("pop")) mim.On("ResolveInputSigningKey", mock.Anything, "", identity.KeyNormalizationBlockchainPlugin).Return("key-resolved", nil) - mbm.On("ValidateInvokeRequest", context.Background(), req.Method, req.Input, req.Errors, false).Return(nil) + opaqueData := "anything" + mbm.On("ParseInterface", context.Background(), req.Method, req.Errors).Return(opaqueData, nil) + mbm.On("ValidateInvokeRequest", context.Background(), opaqueData, req.Input, false).Return(nil) // If ResubmitOperations returns an error trying to resubmit an operation we expect that back, not a 409 conflict _, err := cm.InvokeContract(context.Background(), req, false) @@ -2321,6 +2523,7 @@ func TestInvokeContractConfirm(t *testing.T) { mom := cm.operations.(*operationmocks.Manager) msa := cm.syncasync.(*syncasyncmocks.Bridge) mbi := cm.blockchain.(*blockchainmocks.Plugin) + txw := cm.txWriter.(*txwritermocks.Writer) req := &core.ContractCallRequest{ Type: core.CallTypeInvoke, @@ -2335,22 +2538,23 @@ func TestInvokeContractConfirm(t *testing.T) { IdempotencyKey: "idem1", } - mth.On("SubmitNewTransaction", mock.Anything, core.TransactionTypeContractInvoke, core.IdempotencyKey("idem1")).Return(fftypes.NewUUID(), nil) - mim.On("ResolveInputSigningKey", mock.Anything, "", identity.KeyNormalizationBlockchainPlugin).Return("key-resolved", nil) - mom.On("AddOrReuseOperation", mock.Anything, mock.MatchedBy(func(op *core.Operation) bool { + txw.On("WriteTransactionAndOps", mock.Anything, core.TransactionTypeContractInvoke, core.IdempotencyKey("idem1"), mock.MatchedBy(func(op *core.Operation) bool { return op.Namespace == "ns1" && op.Type == core.OpTypeBlockchainInvoke && op.Plugin == "mockblockchain" - })).Return(nil) + })).Return(&core.Transaction{ID: fftypes.NewUUID()}, nil) + mim.On("ResolveInputSigningKey", mock.Anything, "", identity.KeyNormalizationBlockchainPlugin).Return("key-resolved", nil) mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(txcommon.BlockchainInvokeData) return op.Type == core.OpTypeBlockchainInvoke && data.Request == req - })).Return(nil, nil) + }), true).Return(nil, nil) msa.On("WaitForInvokeOperation", mock.Anything, mock.Anything, mock.Anything). Run(func(args mock.Arguments) { send := args[2].(syncasync.SendFunction) send(context.Background()) }). Return(&core.Operation{}, nil) - mbi.On("ValidateInvokeRequest", mock.Anything, req.Method, req.Input, req.Errors, false).Return(nil) + opaqueData := "anything" + mbi.On("ParseInterface", context.Background(), req.Method, req.Errors).Return(opaqueData, nil) + mbi.On("ValidateInvokeRequest", mock.Anything, opaqueData, req.Input, false).Return(nil) _, err := cm.InvokeContract(context.Background(), req, true) @@ -2371,6 +2575,7 @@ func TestInvokeContractFail(t *testing.T) { mth := cm.txHelper.(*txcommonmocks.Helper) mom := cm.operations.(*operationmocks.Manager) mbi := cm.blockchain.(*blockchainmocks.Plugin) + txw := cm.txWriter.(*txwritermocks.Writer) req := &core.ContractCallRequest{ Type: core.CallTypeInvoke, @@ -2385,16 +2590,17 @@ func TestInvokeContractFail(t *testing.T) { IdempotencyKey: "idem1", } - mth.On("SubmitNewTransaction", mock.Anything, core.TransactionTypeContractInvoke, core.IdempotencyKey("idem1")).Return(fftypes.NewUUID(), nil) - mim.On("ResolveInputSigningKey", mock.Anything, "", identity.KeyNormalizationBlockchainPlugin).Return("key-resolved", nil) - mom.On("AddOrReuseOperation", mock.Anything, mock.MatchedBy(func(op *core.Operation) bool { + txw.On("WriteTransactionAndOps", mock.Anything, core.TransactionTypeContractInvoke, core.IdempotencyKey("idem1"), mock.MatchedBy(func(op *core.Operation) bool { return op.Namespace == "ns1" && op.Type == core.OpTypeBlockchainInvoke && op.Plugin == "mockblockchain" - })).Return(nil) + })).Return(&core.Transaction{ID: fftypes.NewUUID()}, nil) + mim.On("ResolveInputSigningKey", mock.Anything, "", identity.KeyNormalizationBlockchainPlugin).Return("key-resolved", nil) mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(txcommon.BlockchainInvokeData) return op.Type == core.OpTypeBlockchainInvoke && data.Request == req - })).Return(nil, fmt.Errorf("pop")) - mbi.On("ValidateInvokeRequest", mock.Anything, req.Method, req.Input, req.Errors, false).Return(nil) + }), true).Return(nil, fmt.Errorf("pop")) + opaqueData := "anything" + mbi.On("ParseInterface", context.Background(), req.Method, req.Errors).Return(opaqueData, nil) + mbi.On("ValidateInvokeRequest", mock.Anything, opaqueData, req.Input, false).Return(nil) _, err := cm.InvokeContract(context.Background(), req, false) @@ -2407,6 +2613,37 @@ func TestInvokeContractFail(t *testing.T) { mbi.AssertExpectations(t) } +func TestInvokeContractBadInput(t *testing.T) { + cm := newTestContractManager() + + req := &core.ContractCallRequest{ + Type: core.CallTypeInvoke, + Interface: fftypes.NewUUID(), + Location: fftypes.JSONAnyPtr(""), + Input: map[string]interface{}{ + "badness": map[bool]bool{false: true}, // cannot be serialized to JSON + }, + } + + _, _, err := cm.writeInvokeTransaction(context.Background(), req) + + assert.Regexp(t, "json", err) +} + +func TestDeployContractBadInput(t *testing.T) { + cm := newTestContractManager() + + req := &core.ContractDeployRequest{ + Options: map[string]interface{}{ + "badness": map[bool]bool{false: true}, // cannot be serialized to JSON + }, + } + + _, _, err := cm.writeDeployTransaction(context.Background(), req) + + assert.Regexp(t, "json", err) +} + func TestInvokeContractFailResolveInputSigningKey(t *testing.T) { cm := newTestContractManager() mim := cm.identity.(*identitymanagermocks.Manager) @@ -2448,6 +2685,7 @@ func TestInvokeContractTXFail(t *testing.T) { mim := cm.identity.(*identitymanagermocks.Manager) mth := cm.txHelper.(*txcommonmocks.Helper) mbi := cm.blockchain.(*blockchainmocks.Plugin) + txw := cm.txWriter.(*txwritermocks.Writer) req := &core.ContractCallRequest{ Type: core.CallTypeInvoke, @@ -2463,8 +2701,12 @@ func TestInvokeContractTXFail(t *testing.T) { } mim.On("ResolveInputSigningKey", mock.Anything, "", identity.KeyNormalizationBlockchainPlugin).Return("key-resolved", nil) - mth.On("SubmitNewTransaction", mock.Anything, core.TransactionTypeContractInvoke, core.IdempotencyKey("idem1")).Return(nil, fmt.Errorf("pop")) - mbi.On("ValidateInvokeRequest", mock.Anything, req.Method, req.Input, req.Errors, false).Return(nil) + txw.On("WriteTransactionAndOps", mock.Anything, core.TransactionTypeContractInvoke, core.IdempotencyKey("idem1"), mock.MatchedBy(func(op *core.Operation) bool { + return op.Namespace == "ns1" && op.Type == core.OpTypeBlockchainInvoke && op.Plugin == "mockblockchain" + })).Return(nil, fmt.Errorf("pop")) + opaqueData := "anything" + mbi.On("ParseInterface", context.Background(), req.Method, req.Errors).Return(opaqueData, nil) + mbi.On("ValidateInvokeRequest", mock.Anything, opaqueData, req.Input, false).Return(nil) _, err := cm.InvokeContract(context.Background(), req, false) @@ -2571,8 +2813,10 @@ func TestQueryContract(t *testing.T) { } mim.On("ResolveQuerySigningKey", mock.Anything, "key-unresolved", identity.KeyNormalizationBlockchainPlugin).Return("key-resolved", nil) - mbi.On("QueryContract", mock.Anything, "key-resolved", req.Location, req.Method, req.Input, req.Errors, req.Options).Return(struct{}{}, nil) - mbi.On("ValidateInvokeRequest", mock.Anything, req.Method, req.Input, req.Errors, false).Return(nil) + opaqueData := "anything" + mbi.On("ParseInterface", context.Background(), req.Method, req.Errors).Return(opaqueData, nil) + mbi.On("ValidateInvokeRequest", mock.Anything, opaqueData, req.Input, false).Return(nil) + mbi.On("QueryContract", mock.Anything, "key-resolved", req.Location, opaqueData, req.Input, req.Options).Return(struct{}{}, nil) _, err := cm.InvokeContract(context.Background(), req, false) @@ -2604,7 +2848,9 @@ func TestCallContractInvalidType(t *testing.T) { mom.On("AddOrReuseOperation", mock.Anything, mock.MatchedBy(func(op *core.Operation) bool { return op.Namespace == "ns1" && op.Type == core.OpTypeBlockchainInvoke && op.Plugin == "mockblockchain" })).Return(nil) - mbi.On("ValidateInvokeRequest", mock.Anything, req.Method, req.Input, req.Errors, false).Return(nil) + opaqueData := "anything" + mbi.On("ParseInterface", context.Background(), req.Method, req.Errors).Return(opaqueData, nil) + mbi.On("ValidateInvokeRequest", mock.Anything, opaqueData, req.Input, false).Return(nil) assert.PanicsWithValue(t, "unknown call type: ", func() { cm.InvokeContract(context.Background(), req, false) @@ -2866,6 +3112,7 @@ func TestInvokeContractAPI(t *testing.T) { mth := cm.txHelper.(*txcommonmocks.Helper) mom := cm.operations.(*operationmocks.Manager) mbi := cm.blockchain.(*blockchainmocks.Plugin) + txw := cm.txWriter.(*txwritermocks.Writer) req := &core.ContractCallRequest{ Type: core.CallTypeInvoke, @@ -2887,15 +3134,16 @@ func TestInvokeContractAPI(t *testing.T) { mim.On("ResolveInputSigningKey", mock.Anything, "", identity.KeyNormalizationBlockchainPlugin).Return("key-resolved", nil) mdb.On("GetContractAPIByName", mock.Anything, "ns1", "banana").Return(api, nil) - mth.On("SubmitNewTransaction", mock.Anything, core.TransactionTypeContractInvoke, core.IdempotencyKey("idem1")).Return(fftypes.NewUUID(), nil) - mom.On("AddOrReuseOperation", mock.Anything, mock.MatchedBy(func(op *core.Operation) bool { + txw.On("WriteTransactionAndOps", mock.Anything, core.TransactionTypeContractInvoke, core.IdempotencyKey("idem1"), mock.MatchedBy(func(op *core.Operation) bool { return op.Namespace == "ns1" && op.Type == core.OpTypeBlockchainInvoke && op.Plugin == "mockblockchain" - })).Return(nil) + })).Return(&core.Transaction{ID: fftypes.NewUUID()}, nil) mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(txcommon.BlockchainInvokeData) return op.Type == core.OpTypeBlockchainInvoke && data.Request == req - })).Return(nil, nil) - mbi.On("ValidateInvokeRequest", mock.Anything, req.Method, req.Input, req.Errors, false).Return(nil) + }), true).Return(nil, nil) + opaqueData := "anything" + mbi.On("ParseInterface", context.Background(), req.Method, req.Errors).Return(opaqueData, nil) + mbi.On("ValidateInvokeRequest", mock.Anything, opaqueData, req.Input, false).Return(nil) _, err := cm.InvokeContractAPI(context.Background(), "banana", "peel", req, false) @@ -2961,11 +3209,11 @@ func TestGetContractAPI(t *testing.T) { } mdb.On("GetContractAPIByName", mock.Anything, "ns1", "banana").Return(api, nil) - result, err := cm.GetContractAPI(context.Background(), "http://localhost/api", "banana") + result, err := cm.GetContractAPI(context.Background(), "http://localhost/api/v1/namespaces/ns1", "banana") assert.NoError(t, err) - assert.Equal(t, "http://localhost/api/namespaces/ns1/apis/banana/api/swagger.json", result.URLs.OpenAPI) - assert.Equal(t, "http://localhost/api/namespaces/ns1/apis/banana/api", result.URLs.UI) + assert.Equal(t, "http://localhost/api/v1/namespaces/ns1/apis/banana/api/swagger.json", result.URLs.OpenAPI) + assert.Equal(t, "http://localhost/api/v1/namespaces/ns1/apis/banana/api", result.URLs.UI) } func TestGetContractAPIs(t *testing.T) { @@ -2981,12 +3229,12 @@ func TestGetContractAPIs(t *testing.T) { filter := database.ContractAPIQueryFactory.NewFilter(context.Background()).And() mdb.On("GetContractAPIs", mock.Anything, "ns1", filter).Return(apis, &ffapi.FilterResult{}, nil) - results, _, err := cm.GetContractAPIs(context.Background(), "http://localhost/api", filter) + results, _, err := cm.GetContractAPIs(context.Background(), "http://localhost/api/v1/namespaces/ns1", filter) assert.NoError(t, err) assert.Equal(t, 1, len(results)) - assert.Equal(t, "http://localhost/api/namespaces/ns1/apis/banana/api/swagger.json", results[0].URLs.OpenAPI) - assert.Equal(t, "http://localhost/api/namespaces/ns1/apis/banana/api", results[0].URLs.UI) + assert.Equal(t, "http://localhost/api/v1/namespaces/ns1/apis/banana/api/swagger.json", results[0].URLs.OpenAPI) + assert.Equal(t, "http://localhost/api/v1/namespaces/ns1/apis/banana/api", results[0].URLs.UI) } func TestGetContractAPIInterface(t *testing.T) { @@ -3067,6 +3315,22 @@ func TestResolveContractAPI(t *testing.T) { mdb.AssertExpectations(t) } +func TestResolveContractAPIValidateFail(t *testing.T) { + cm := newTestContractManager() + + api := &core.ContractAPI{ + ID: fftypes.NewUUID(), + Namespace: "ns1", + Name: "BAD***BAD", + Interface: &fftypes.FFIReference{ + ID: fftypes.NewUUID(), + }, + } + + err := cm.ResolveContractAPI(context.Background(), "http://localhost/api", api) + assert.Regexp(t, "FF00140", err) +} + func TestResolveContractAPIBadLocation(t *testing.T) { cm := newTestContractManager() mbi := cm.blockchain.(*blockchainmocks.Plugin) @@ -3311,30 +3575,10 @@ func TestValidateFFIParamBadSchemaJSON(t *testing.T) { Name: "x", Schema: fftypes.JSONAnyPtr(`{"type": "integer"`), } - err := cm.validateFFIParam(context.Background(), param) + _, _, err := cm.validateFFIParam(context.Background(), param) assert.Regexp(t, "unexpected EOF", err) } -func TestCheckParamSchemaBadSchema(t *testing.T) { - cm := newTestContractManager() - param := &fftypes.FFIParam{ - Name: "x", - Schema: fftypes.JSONAnyPtr(`{"type": "integer"`), - } - err := cm.checkParamSchema(context.Background(), 1, param) - assert.Regexp(t, "unexpected EOF", err) -} - -func TestCheckParamSchemaCompileFail(t *testing.T) { - cm := newTestContractManager() - param := &fftypes.FFIParam{ - Name: "x", - Schema: fftypes.JSONAnyPtr(``), - } - err := cm.checkParamSchema(context.Background(), 1, param) - assert.Regexp(t, "compilation failed", err) -} - func TestAddJSONSchemaExtension(t *testing.T) { cm := &contractManager{ database: &databasemocks.Plugin{}, @@ -3349,13 +3593,32 @@ func TestAddJSONSchemaExtension(t *testing.T) { func TestGenerateFFI(t *testing.T) { cm := newTestContractManager() mbi := cm.blockchain.(*blockchainmocks.Plugin) - mbi.On("GenerateFFI", mock.Anything, mock.Anything).Return(&fftypes.FFI{ - Name: "generated", - }, nil) + gfi := mbi.On("GenerateFFI", mock.Anything, mock.Anything) + gfi.Run(func(args mock.Arguments) { + gf := args[1].(*fftypes.FFIGenerationRequest) + gfi.Return(&fftypes.FFI{ + Name: gf.Name, + Version: gf.Version, + Methods: []*fftypes.FFIMethod{ + { + Name: "method1", + }, + { + Name: "method1", + }, + }, + }, nil) + }) + ffi, err := cm.GenerateFFI(context.Background(), &fftypes.FFIGenerationRequest{}) assert.NoError(t, err) assert.NotNil(t, ffi) assert.Equal(t, "generated", ffi.Name) + assert.Equal(t, "0.0.1", ffi.Version) + assert.Equal(t, "method1", ffi.Methods[0].Name) + assert.Equal(t, "method1", ffi.Methods[0].Pathname) + assert.Equal(t, "method1", ffi.Methods[1].Name) + assert.Equal(t, "method1_1", ffi.Methods[1].Pathname) } type MockFFIParamValidator struct{} @@ -3383,3 +3646,209 @@ func TestBuildInvokeMessageInvalidType(t *testing.T) { }) assert.Regexp(t, "FF10287", err) } + +func TestDeleteFFI(t *testing.T) { + cm := newTestContractManager() + + id := fftypes.NewUUID() + + mdi := cm.database.(*databasemocks.Plugin) + mdi.On("GetFFIByID", context.Background(), "ns1", id).Return(&fftypes.FFI{}, nil) + mdi.On("DeleteFFI", context.Background(), "ns1", id).Return(nil) + + err := cm.DeleteFFI(context.Background(), id) + assert.NoError(t, err) + + mdi.AssertExpectations(t) +} + +func TestDeleteFFINotFound(t *testing.T) { + cm := newTestContractManager() + + id := fftypes.NewUUID() + + mdi := cm.database.(*databasemocks.Plugin) + mdi.On("GetFFIByID", context.Background(), "ns1", id).Return(nil, nil) + + err := cm.DeleteFFI(context.Background(), id) + assert.Regexp(t, "FF10109", err) + + mdi.AssertExpectations(t) +} + +func TestDeleteFFIFailGet(t *testing.T) { + cm := newTestContractManager() + + id := fftypes.NewUUID() + + mdi := cm.database.(*databasemocks.Plugin) + mdi.On("GetFFIByID", context.Background(), "ns1", id).Return(nil, fmt.Errorf("pop")) + + err := cm.DeleteFFI(context.Background(), id) + assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) +} + +func TestDeleteFFIPublished(t *testing.T) { + cm := newTestContractManager() + + id := fftypes.NewUUID() + + mdi := cm.database.(*databasemocks.Plugin) + mdi.On("GetFFIByID", context.Background(), "ns1", id).Return(&fftypes.FFI{Published: true}, nil) + + err := cm.DeleteFFI(context.Background(), id) + assert.Regexp(t, "FF10449", err) + + mdi.AssertExpectations(t) +} + +func TestDeleteContractAPI(t *testing.T) { + cm := newTestContractManager() + + id := fftypes.NewUUID() + + mdi := cm.database.(*databasemocks.Plugin) + mdi.On("GetContractAPIByName", context.Background(), "ns1", "banana").Return(&core.ContractAPI{ID: id}, nil) + mdi.On("DeleteContractAPI", context.Background(), "ns1", id).Return(nil) + + err := cm.DeleteContractAPI(context.Background(), "banana") + assert.NoError(t, err) + + mdi.AssertExpectations(t) +} + +func TestDeleteContractAPIFailGet(t *testing.T) { + cm := newTestContractManager() + + mdi := cm.database.(*databasemocks.Plugin) + mdi.On("GetContractAPIByName", context.Background(), "ns1", "banana").Return(nil, fmt.Errorf("pop")) + + err := cm.DeleteContractAPI(context.Background(), "banana") + assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) +} + +func TestDeleteContractAPINotFound(t *testing.T) { + cm := newTestContractManager() + + mdi := cm.database.(*databasemocks.Plugin) + mdi.On("GetContractAPIByName", context.Background(), "ns1", "banana").Return(nil, nil) + + err := cm.DeleteContractAPI(context.Background(), "banana") + assert.Regexp(t, "FF10109", err) + + mdi.AssertExpectations(t) +} + +func TestDeleteContractAPIPublished(t *testing.T) { + cm := newTestContractManager() + + mdi := cm.database.(*databasemocks.Plugin) + mdi.On("GetContractAPIByName", context.Background(), "ns1", "banana").Return(&core.ContractAPI{Published: true}, nil) + + err := cm.DeleteContractAPI(context.Background(), "banana") + assert.Regexp(t, "FF10449", err) + + mdi.AssertExpectations(t) +} + +func TestResolveInvokeContractRequestCache(t *testing.T) { + cm := newTestContractManager() + mim := cm.identity.(*identitymanagermocks.Manager) + mdi := cm.database.(*databasemocks.Plugin) + mth := cm.txHelper.(*txcommonmocks.Helper) + mom := cm.operations.(*operationmocks.Manager) + mbi := cm.blockchain.(*blockchainmocks.Plugin) + + method := &fftypes.FFIMethod{ + Name: "doStuff", + ID: fftypes.NewUUID(), + Params: fftypes.FFIParams{}, + Returns: fftypes.FFIParams{}, + } + errors := []*fftypes.FFIError{} + + mdb := cm.database.(*databasemocks.Plugin) + mdb.On("GetFFIMethod", mock.Anything, "ns1", mock.Anything, mock.Anything).Return(method, nil).Once() + mdb.On("GetFFIErrors", mock.Anything, "ns1", mock.Anything).Return(errors, nil, nil).Once() + + interfaceID := fftypes.NewUUID() + err := cm.resolveInvokeContractRequest(context.Background(), &core.ContractCallRequest{ + Type: core.CallTypeInvoke, + Interface: interfaceID, + Location: fftypes.JSONAnyPtr("location1"), + MethodPath: "doStuff", + IdempotencyKey: "idem1", + }) + assert.NoError(t, err) + + // Test with Once() that the second is cached + err = cm.resolveInvokeContractRequest(context.Background(), &core.ContractCallRequest{ + Type: core.CallTypeInvoke, + Interface: interfaceID, + Location: fftypes.JSONAnyPtr("location2"), + MethodPath: "doStuff", + IdempotencyKey: "idem2", + }) + assert.NoError(t, err) + + mth.AssertExpectations(t) + mim.AssertExpectations(t) + mdi.AssertExpectations(t) + mom.AssertExpectations(t) + mbi.AssertExpectations(t) +} + +func TestEnsureParamNamesIncludedInCacheKeys(t *testing.T) { + method1 := `{ + "name": "myFunction", + "params": [ + { + "name": "param_name_1", + "schema": { + "details": { + "internalType": "address", + "type": "address" + }, + "type": "string" + } + } + ], + "returns": [] + }` + var method1FFI *fftypes.FFIMethod + err := json.Unmarshal([]byte(method1), &method1FFI) + assert.NoError(t, err) + + method2 := `{ + "name": "myFunction", + "params": [ + { + "name": "param_name_2", + "schema": { + "details": { + "internalType": "address", + "type": "address" + }, + "type": "string" + } + } + ], + "returns": [] + }` + var method2FFI *fftypes.FFIMethod + err = json.Unmarshal([]byte(method2), &method2FFI) + assert.NoError(t, err) + + cm := newTestContractManager() + paramUniqueHash1, _, err := cm.validateFFIMethod(context.Background(), method1FFI) + assert.NoError(t, err) + paramUniqueHash2, _, err := cm.validateFFIMethod(context.Background(), method2FFI) + assert.NoError(t, err) + + assert.NotEqual(t, hex.EncodeToString(paramUniqueHash1.Sum(nil)), hex.EncodeToString(paramUniqueHash2.Sum(nil))) + +} diff --git a/internal/contracts/operations.go b/internal/contracts/operations.go index 882d85114c..c308aa51ad 100644 --- a/internal/contracts/operations.go +++ b/internal/contracts/operations.go @@ -22,6 +22,7 @@ import ( "github.com/hyperledger/firefly-common/pkg/fftypes" "github.com/hyperledger/firefly-common/pkg/i18n" + "github.com/hyperledger/firefly-common/pkg/log" "github.com/hyperledger/firefly/internal/batch" "github.com/hyperledger/firefly/internal/coremsgs" "github.com/hyperledger/firefly/internal/data" @@ -112,7 +113,7 @@ func (cm *contractManager) PrepareOperation(ctx context.Context, op *core.Operat } } -func (cm *contractManager) RunOperation(ctx context.Context, op *core.PreparedOperation) (outputs fftypes.JSONObject, complete bool, err error) { +func (cm *contractManager) RunOperation(ctx context.Context, op *core.PreparedOperation) (outputs fftypes.JSONObject, phase core.OpPhase, err error) { switch data := op.Data.(type) { case txcommon.BlockchainInvokeData: req := data.Request @@ -126,14 +127,30 @@ func (cm *contractManager) RunOperation(ctx context.Context, op *core.PreparedOp Contexts: data.BatchPin.Contexts, } } - return nil, false, cm.blockchain.InvokeContract(ctx, op.NamespacedIDString(), req.Key, req.Location, req.Method, req.Input, req.Errors, req.Options, batchPin) - + bcParsedMethod, err := cm.validateInvokeContractRequest(ctx, req, false /* do-not revalidate with the blockchain connector - just send it */) + if err != nil { + return nil, core.OpPhaseInitializing, err + } + submissionRejected, err := cm.blockchain.InvokeContract(ctx, op.NamespacedIDString(), req.Key, req.Location, bcParsedMethod, req.Input, req.Options, batchPin) + return nil, submissionPhase(ctx, submissionRejected, err), err case blockchainContractDeployData: req := data.Request - return nil, false, cm.blockchain.DeployContract(ctx, op.NamespacedIDString(), req.Key, req.Definition, req.Contract, req.Input, req.Options) + submissionRejected, err := cm.blockchain.DeployContract(ctx, op.NamespacedIDString(), req.Key, req.Definition, req.Contract, req.Input, req.Options) + return nil, submissionPhase(ctx, submissionRejected, err), err default: - return nil, false, i18n.NewError(ctx, coremsgs.MsgOperationDataIncorrect, op.Data) + return nil, core.OpPhaseInitializing, i18n.NewError(ctx, coremsgs.MsgOperationDataIncorrect, op.Data) + } +} + +func submissionPhase(ctx context.Context, submissionRejected bool, err error) core.OpPhase { + if err == nil { + return core.OpPhasePending + } + log.L(ctx).Errorf("Transaction submission failed [submissionRejected=%t]: %s", submissionRejected, err) + if submissionRejected { + return core.OpPhaseComplete } + return core.OpPhaseInitializing } func (cm *contractManager) OnOperationUpdate(ctx context.Context, op *core.Operation, update *core.OperationUpdate) error { diff --git a/internal/contracts/operations_test.go b/internal/contracts/operations_test.go index e6aebc28ce..bd425dabc1 100644 --- a/internal/contracts/operations_test.go +++ b/internal/contracts/operations_test.go @@ -41,6 +41,9 @@ func reqWithMessage(msgType core.MessageType) *core.ContractCallRequest { Location: fftypes.JSONAnyPtr(`{"address":"0x1111"}`), Method: &fftypes.FFIMethod{ Name: "set", + Params: fftypes.FFIParams{ + {Name: "data", Schema: fftypes.JSONAnyPtr(`{"type":"string"}`)}, + }, }, Input: map[string]interface{}{ "value": "1", @@ -78,21 +81,147 @@ func TestPrepareAndRunBlockchainInvoke(t *testing.T) { assert.NoError(t, err) mbi := cm.blockchain.(*blockchainmocks.Plugin) + opaqueData := "anything" + mbi.On("ParseInterface", context.Background(), mock.MatchedBy(func(method *fftypes.FFIMethod) bool { + return method.Name == req.Method.Name + }), req.Errors).Return(opaqueData, nil) mbi.On("InvokeContract", context.Background(), "ns1:"+op.ID.String(), "0x123", mock.MatchedBy(func(loc *fftypes.JSONAny) bool { return loc.String() == req.Location.String() - }), mock.MatchedBy(func(method *fftypes.FFIMethod) bool { + }), opaqueData, req.Input, req.Options, (*blockchain.BatchPin)(nil)).Return(false, nil) + + po, err := cm.PrepareOperation(context.Background(), op) + assert.NoError(t, err) + assert.Equal(t, req, po.Data.(txcommon.BlockchainInvokeData).Request) + + _, phase, err := cm.RunOperation(context.Background(), po) + + assert.Equal(t, core.OpPhasePending, phase) + assert.NoError(t, err) + + mbi.AssertExpectations(t) +} + +func TestPrepareAndRunBlockchainInvokeRejected(t *testing.T) { + cm := newTestContractManager() + + op := &core.Operation{ + Type: core.OpTypeBlockchainInvoke, + ID: fftypes.NewUUID(), + Namespace: "ns1", + } + req := &core.ContractCallRequest{ + Key: "0x123", + Location: fftypes.JSONAnyPtr(`{"address":"0x1111"}`), + Method: &fftypes.FFIMethod{ + Name: "set", + }, + Input: map[string]interface{}{ + "value": "1", + }, + } + err := addBlockchainReqInputs(op, req) + assert.NoError(t, err) + + mbi := cm.blockchain.(*blockchainmocks.Plugin) + opaqueData := "anything" + mbi.On("ParseInterface", context.Background(), mock.MatchedBy(func(method *fftypes.FFIMethod) bool { return method.Name == req.Method.Name - }), req.Input, req.Errors, req.Options, (*blockchain.BatchPin)(nil)).Return(nil) + }), req.Errors).Return(opaqueData, nil) + mbi.On("InvokeContract", context.Background(), "ns1:"+op.ID.String(), "0x123", mock.MatchedBy(func(loc *fftypes.JSONAny) bool { + return loc.String() == req.Location.String() + }), opaqueData, req.Input, req.Options, (*blockchain.BatchPin)(nil)). + Return(true, fmt.Errorf("rejected")) po, err := cm.PrepareOperation(context.Background(), op) assert.NoError(t, err) assert.Equal(t, req, po.Data.(txcommon.BlockchainInvokeData).Request) - _, complete, err := cm.RunOperation(context.Background(), po) + _, phase, err := cm.RunOperation(context.Background(), po) + + assert.Equal(t, core.OpPhaseComplete, phase) + assert.Regexp(t, "rejected", err) + + mbi.AssertExpectations(t) +} + +func TestPrepareAndRunBlockchainInvokeRetryable(t *testing.T) { + cm := newTestContractManager() - assert.False(t, complete) + op := &core.Operation{ + Type: core.OpTypeBlockchainInvoke, + ID: fftypes.NewUUID(), + Namespace: "ns1", + } + req := &core.ContractCallRequest{ + Key: "0x123", + Location: fftypes.JSONAnyPtr(`{"address":"0x1111"}`), + Method: &fftypes.FFIMethod{ + Name: "set", + }, + Input: map[string]interface{}{ + "value": "1", + }, + } + err := addBlockchainReqInputs(op, req) assert.NoError(t, err) + mbi := cm.blockchain.(*blockchainmocks.Plugin) + opaqueData := "anything" + mbi.On("ParseInterface", context.Background(), mock.MatchedBy(func(method *fftypes.FFIMethod) bool { + return method.Name == req.Method.Name + }), req.Errors).Return(opaqueData, nil) + mbi.On("InvokeContract", context.Background(), "ns1:"+op.ID.String(), "0x123", mock.MatchedBy(func(loc *fftypes.JSONAny) bool { + return loc.String() == req.Location.String() + }), opaqueData, req.Input, req.Options, (*blockchain.BatchPin)(nil)). + Return(false, fmt.Errorf("rejected")) + + po, err := cm.PrepareOperation(context.Background(), op) + assert.NoError(t, err) + assert.Equal(t, req, po.Data.(txcommon.BlockchainInvokeData).Request) + + _, phase, err := cm.RunOperation(context.Background(), po) + + assert.Equal(t, core.OpPhaseInitializing, phase) + assert.Regexp(t, "rejected", err) + + mbi.AssertExpectations(t) +} + +func TestPrepareAndRunBlockchainInvokeValidateFail(t *testing.T) { + cm := newTestContractManager() + + op := &core.Operation{ + Type: core.OpTypeBlockchainInvoke, + ID: fftypes.NewUUID(), + Namespace: "ns1", + } + req := &core.ContractCallRequest{ + Key: "0x123", + Location: fftypes.JSONAnyPtr(`{"address":"0x1111"}`), + Method: &fftypes.FFIMethod{ + Name: "set", + }, + Input: map[string]interface{}{ + "value": "1", + }, + } + err := addBlockchainReqInputs(op, req) + assert.NoError(t, err) + + mbi := cm.blockchain.(*blockchainmocks.Plugin) + mbi.On("ParseInterface", context.Background(), mock.MatchedBy(func(method *fftypes.FFIMethod) bool { + return method.Name == req.Method.Name + }), req.Errors).Return(nil, fmt.Errorf("pop")) + + po, err := cm.PrepareOperation(context.Background(), op) + assert.NoError(t, err) + assert.Equal(t, req, po.Data.(txcommon.BlockchainInvokeData).Request) + + _, phase, err := cm.RunOperation(context.Background(), po) + + assert.Equal(t, core.OpPhaseInitializing, phase) + assert.Regexp(t, "pop", err) + mbi.AssertExpectations(t) } @@ -115,16 +244,50 @@ func TestPrepareAndRunBlockchainContractDeploy(t *testing.T) { assert.NoError(t, err) mbi := cm.blockchain.(*blockchainmocks.Plugin) - mbi.On("DeployContract", context.Background(), "ns1:"+op.ID.String(), signingKey, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + mbi.On("DeployContract", context.Background(), "ns1:"+op.ID.String(), signingKey, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(false, nil) po, err := cm.PrepareOperation(context.Background(), op) assert.NoError(t, err) assert.Equal(t, req, po.Data.(blockchainContractDeployData).Request) - _, complete, err := cm.RunOperation(context.Background(), po) + _, phase, err := cm.RunOperation(context.Background(), po) + + assert.Equal(t, core.OpPhasePending, phase) + assert.NoError(t, err) + + mbi.AssertExpectations(t) +} + +func TestPrepareAndRunBlockchainContractDeployRejected(t *testing.T) { + cm := newTestContractManager() + + op := &core.Operation{ + Type: core.OpTypeBlockchainContractDeploy, + ID: fftypes.NewUUID(), + Namespace: "ns1", + } + signingKey := "0x2468" + req := &core.ContractDeployRequest{ + Key: signingKey, + Definition: fftypes.JSONAnyPtr("[]"), + Contract: fftypes.JSONAnyPtr("\"0x123456\""), + Input: []interface{}{"one", "two", "three"}, + } + err := addBlockchainReqInputs(op, req) + assert.NoError(t, err) + + mbi := cm.blockchain.(*blockchainmocks.Plugin) + mbi.On("DeployContract", context.Background(), "ns1:"+op.ID.String(), signingKey, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(true, fmt.Errorf("rejected")) - assert.False(t, complete) + po, err := cm.PrepareOperation(context.Background(), op) assert.NoError(t, err) + assert.Equal(t, req, po.Data.(blockchainContractDeployData).Request) + + _, phase, err := cm.RunOperation(context.Background(), po) + + assert.Equal(t, core.OpPhaseComplete, phase) + assert.Regexp(t, "rejected", err) mbi.AssertExpectations(t) } @@ -532,27 +695,29 @@ func TestRunBlockchainInvokeWithBatch(t *testing.T) { pin := fftypes.NewRandB32() mbi := cm.blockchain.(*blockchainmocks.Plugin) + opaqueData := "anything" + mbi.On("ParseInterface", context.Background(), mock.MatchedBy(func(method *fftypes.FFIMethod) bool { + return method.Name == req.Method.Name + }), req.Errors).Return(opaqueData, nil) mbi.On("InvokeContract", context.Background(), "ns1:"+op.ID.String(), "0x123", mock.MatchedBy(func(loc *fftypes.JSONAny) bool { return loc.String() == req.Location.String() - }), mock.MatchedBy(func(method *fftypes.FFIMethod) bool { - return method.Name == req.Method.Name - }), req.Input, req.Errors, req.Options, mock.MatchedBy(func(batchPin *blockchain.BatchPin) bool { + }), opaqueData, req.Input, req.Options, mock.MatchedBy(func(batchPin *blockchain.BatchPin) bool { assert.Equal(t, storedBatch.ID, batchPin.BatchID) assert.Equal(t, storedBatch.Hash, batchPin.BatchHash) assert.Equal(t, storedBatch.TX.ID, batchPin.TransactionID) assert.Equal(t, []*fftypes.Bytes32{pin}, batchPin.Contexts) assert.Equal(t, "test-payload", batchPin.BatchPayloadRef) return true - })).Return(nil) + })).Return(false, nil) po := txcommon.OpBlockchainInvoke(op, req, &txcommon.BatchPinData{ Batch: storedBatch, Contexts: []*fftypes.Bytes32{pin}, PayloadRef: "test-payload", }) - _, complete, err := cm.RunOperation(context.Background(), po) + _, phase, err := cm.RunOperation(context.Background(), po) - assert.False(t, complete) + assert.Equal(t, core.OpPhasePending, phase) assert.NoError(t, err) mbi.AssertExpectations(t) @@ -561,9 +726,9 @@ func TestRunBlockchainInvokeWithBatch(t *testing.T) { func TestRunOperationNotSupported(t *testing.T) { cm := newTestContractManager() - _, complete, err := cm.RunOperation(context.Background(), &core.PreparedOperation{}) + _, phase, err := cm.RunOperation(context.Background(), &core.PreparedOperation{}) - assert.False(t, complete) + assert.Equal(t, core.OpPhaseInitializing, phase) assert.Regexp(t, "FF10378", err) } diff --git a/internal/coreconfig/coreconfig.go b/internal/coreconfig/coreconfig.go index e5cc6ff273..1a8e904ece 100644 --- a/internal/coreconfig/coreconfig.go +++ b/internal/coreconfig/coreconfig.go @@ -37,6 +37,12 @@ const ( NamespaceDescription = "description" // NamespacePlugins is the list of namespace plugins NamespacePlugins = "plugins" + // NamespaceTLSConfigName is the user-supplied name for the TLS Config + NamespaceTLSConfigName = "name" + // NamespaceTLSConfigs is the list of tls configs + NamespaceTLSConfigs = "tlsConfigs" + // NamespaceTLSConfigTLSSection is the section to provide the paths to CA , cert and key files + NamespaceTLSConfigTLSSection = "tls" // NamespaceDefaultKey is the default signing key for blockchain transactions within this namespace NamespaceDefaultKey = "defaultKey" // NamespaceAssetKeyNormalization mechanism to normalize keys before using them. Valid options: "blockchain_plugin" - use blockchain plugin (default), "none" - do not attempt normalization @@ -80,6 +86,8 @@ var ( APIRequestTimeout = ffc("api.requestTimeout") // APIRequestMaxTimeout is the maximum timeout an application can set using a Request-Timeout header APIRequestMaxTimeout = ffc("api.requestMaxTimeout") + // APIDynamicPublicURLHeader is a header that can be used on requests to generate Swagger to influence the PublicURL on a per-request basis + APIDynamicPublicURLHeader = ffc("api.dynamicPublicURLHeader") // APIOASPanicOnMissingDescription controls whether the OpenAPI Spec generator will strongly enforce descriptions on every field or not APIOASPanicOnMissingDescription = ffc("api.oas.panicOnMissingDescription") // APIPassThroughHeaders is a list of HTTP request headers to pass through to requests made to dependency microservices @@ -154,6 +162,11 @@ var ( // DataManager Message cache config CacheMessageSize = ffc("cache.message.size") CacheMessageTTL = ffc("cache.message.ttl") + + // Token pool cache config + CacheTokenPoolTTL = ffc("cache.tokenpool.ttl") + CacheTokenPoolLimit = ffc("cache.tokenpool.limit") + // DataManager Validator cache config CacheValidatorSize = ffc("cache.validator.size") CacheValidatorTTL = ffc("cache.validator.ttl") @@ -166,6 +179,10 @@ var ( CacheOperationsLimit = ffc("cache.operations.limit") CacheOperationsTTL = ffc("cache.operations.ttl") + // Invoke methods cache config + CacheMethodsLimit = ffc("cache.methods.limit") + CacheMethodsTTL = ffc("cache.methods.ttl") + // DownloadWorkerCount is the number of download workers created to pull data from shared storage to the local DX DownloadWorkerCount = ffc("download.worker.count") // DownloadWorkerQueueLength is the length of the work queue in the channel to the workers - defaults to 2x the worker count @@ -320,6 +337,12 @@ var ( SubscriptionsRetryMaxDelay = ffc("subscription.retry.maxDelay") // SubscriptionsRetryFactor the backoff factor to use for retry of database operations SubscriptionsRetryFactor = ffc("subscription.retry.factor") + // TransactionWriterCount + TransactionWriterCount = ffc("transaction.writer.count") + // TransactionWriterBatchTimeout + TransactionWriterBatchTimeout = ffc("transaction.writer.batchTimeout") + // TransactionWriterBatchMaxTransactions + TransactionWriterBatchMaxTransactions = ffc("transaction.writer.batchMaxTransactions") // AssetManagerKeyNormalization mechanism to normalize keys before using them. Valid options: "blockchain_plugin" - use blockchain plugin (default), "none" - do not attempt normalization AssetManagerKeyNormalization = ffc("asset.manager.keyNormalization") @@ -356,7 +379,7 @@ func setDefaults() { viper.SetDefault(string(BlobReceiverWorkerBatchTimeout), "50ms") viper.SetDefault(string(BlobReceiverWorkerCount), 5) viper.SetDefault(string(BlobReceiverWorkerBatchMaxInserts), 200) - viper.SetDefault(string(CacheBlockchainEventLimit), 100) + viper.SetDefault(string(CacheBlockchainEventLimit), 1000) viper.SetDefault(string(CacheBlockchainEventTTL), "5m") viper.SetDefault(string(BroadcastBatchAgentTimeout), "2m") viper.SetDefault(string(BroadcastBatchSize), 200) @@ -367,8 +390,10 @@ func setDefaults() { viper.SetDefault(string(CacheAddressResolverLimit), 1000) viper.SetDefault(string(CacheAddressResolverTTL), "24h") viper.SetDefault(string(CacheEnabled), true) - viper.SetDefault(string(CacheOperationsLimit), 200) + viper.SetDefault(string(CacheOperationsLimit), 1000) viper.SetDefault(string(CacheOperationsTTL), "5m") + viper.SetDefault(string(CacheMethodsLimit), 200) + viper.SetDefault(string(CacheMethodsTTL), "5m") viper.SetDefault(string(HistogramsMaxChartRows), 100) viper.SetDefault(string(DebugPort), -1) viper.SetDefault(string(DebugAddress), "localhost") @@ -431,6 +456,9 @@ func setDefaults() { viper.SetDefault(string(SubscriptionsRetryInitialDelay), "250ms") viper.SetDefault(string(SubscriptionsRetryMaxDelay), "30s") viper.SetDefault(string(SubscriptionsRetryFactor), 2.0) + viper.SetDefault(string(TransactionWriterBatchMaxTransactions), 100) + viper.SetDefault(string(TransactionWriterBatchTimeout), "10ms") + viper.SetDefault(string(TransactionWriterCount), 5) viper.SetDefault(string(CacheTransactionSize), "1Mb") viper.SetDefault(string(CacheTransactionTTL), "5m") viper.SetDefault(string(UIEnabled), true) @@ -438,6 +466,8 @@ func setDefaults() { viper.SetDefault(string(CacheValidatorTTL), "1h") viper.SetDefault(string(CacheIdentityLimit), 100) viper.SetDefault(string(CacheIdentityTTL), "1h") + viper.SetDefault(string(CacheTokenPoolLimit), 100) + viper.SetDefault(string(CacheTokenPoolTTL), "1h") } func Reset() { diff --git a/internal/coremsgs/en_api_translations.go b/internal/coremsgs/en_api_translations.go index 74dfe470a7..677d768f60 100644 --- a/internal/coremsgs/en_api_translations.go +++ b/internal/coremsgs/en_api_translations.go @@ -76,8 +76,11 @@ var ( APIEndpointsAdminGetListenerByID = ffm("api.endpoints.adminGetListenerByID", "Gets a contract listener by ID") APIEndpointsAdminGetListeners = ffm("api.endpoints.adminGetListeners", "Lists contract listeners") + APIEndpointsDeleteContractAPI = ffm("api.endpoints.deleteContractAPI", "Delete a contract API") + APIEndpointsDeleteContractInterface = ffm("api.endpoints.deleteContractInterface", "Delete a contract interface") APIEndpointsDeleteContractListener = ffm("api.endpoints.deleteContractListener", "Deletes a contract listener referenced by its name or its ID") APIEndpointsDeleteSubscription = ffm("api.endpoints.deleteSubscription", "Deletes a subscription") + APIEndpointsDeleteTokenPool = ffm("api.endpoints.deleteTokenPool", "Delete a token pool") APIEndpointsGetBatchBbyID = ffm("api.endpoints.getBatchByID", "Gets a message batch") APIEndpointsGetBatches = ffm("api.endpoints.getBatches", "Gets a list of message batches") APIEndpointsGetBlockchainEventByID = ffm("api.endpoints.getBlockchainEventByID", "Gets a blockchain event") @@ -150,10 +153,12 @@ var ( APIEndpointsPatchUpdateIdentity = ffm("api.endpoints.patchUpdateIdentity", "Updates an identity") APIEndpointsPostContractDeploy = ffm("api.endpoints.postContractDeploy", "Deploy a new smart contract") APIEndpointsPostContractAPIInvoke = ffm("api.endpoints.postContractAPIInvoke", "Invokes a method on a smart contract API. Performs a blockchain transaction.") + APIEndpointsPostContractAPIPublish = ffm("api.endpoints.postContractAPIPublish", "Publish a contract API to all other members of the multiparty network") APIEndpointsPostContractAPIQuery = ffm("api.endpoints.postContractAPIQuery", "Queries a method on a smart contract API. Performs a read-only query.") APIEndpointsPostContractInterfaceGenerate = ffm("api.endpoints.postContractInterfaceGenerate", "A convenience method to convert a blockchain specific smart contract format into a FireFly Interface format. The specific blockchain plugin in use must support this functionality.") APIEndpointsPostContractInterfaceInvoke = ffm("api.endpoints.postContractInterfaceInvoke", "Invokes a method on a smart contract that matches a given contract interface. Performs a blockchain transaction.") APIEndpointsPostContractInterfaceQuery = ffm("api.endpoints.postContractInterfaceQuery", "Queries a method on a smart contract that matches a given contract interface. Performs a read-only query.") + APIEndpointsPostContractInterfacePublish = ffm("api.endpoints.postContractInterfacePublish", "Publish a contract interface to all other members of the multiparty network") APIEndpointsPostContractInvoke = ffm("api.endpoints.postContractInvoke", "Invokes a method on a smart contract. Performs a blockchain transaction.") APIEndpointsPostContractQuery = ffm("api.endpoints.postContractQuery", "Queries a method on a smart contract. Performs a read-only query.") APIEndpointsPostData = ffm("api.endpoints.postData", "Creates a new data item in this FireFly node") @@ -178,6 +183,7 @@ var ( APIEndpointsPostTokenBurn = ffm("api.endpoints.postTokenBurn", "Burns some tokens") APIEndpointsPostTokenMint = ffm("api.endpoints.postTokenMint", "Mints some tokens") APIEndpointsPostTokenPool = ffm("api.endpoints.postTokenPool", "Creates a new token pool") + APIEndpointsPostTokenPoolPublish = ffm("api.endpoints.postTokenPoolPublish", "Publish a token pool to all other members of the multiparty network") APIEndpointsPostTokenTransfer = ffm("api.endpoints.postTokenTransfer", "Transfers some tokens") APIEndpointsPutContractAPI = ffm("api.endpoints.putContractAPI", "Updates an existing contract API") APIEndpointsPutSubscription = ffm("api.endpoints.putSubscription", "Update an existing subscription") @@ -194,6 +200,7 @@ var ( APIFilterCountDesc = ffm("api.filterCount", "Return a total count as well as items (adds extra database processing)") APIFetchDataDesc = ffm("api.fetchData", "Fetch the data and include it in the messages returned") APIConfirmQueryParam = ffm("api.confirmQueryParam", "When true the HTTP request blocks until the message is confirmed") + APIPublishQueryParam = ffm("api.publishQueryParam", "When true the definition will be published to all other members of the multiparty network") APIHistogramStartTimeParam = ffm("api.histogramStartTime", "Start time of the data to be fetched") APIHistogramEndTimeParam = ffm("api.histogramEndTime", "End time of the data to be fetched") APIHistogramBucketsParam = ffm("api.histogramBuckets", "Number of buckets between start time and end time") diff --git a/internal/coremsgs/en_config_descriptions.go b/internal/coremsgs/en_config_descriptions.go index 4870369d5f..a148b5b2b9 100644 --- a/internal/coremsgs/en_config_descriptions.go +++ b/internal/coremsgs/en_config_descriptions.go @@ -25,6 +25,9 @@ var ffc = func(key, translation, fieldType string) i18n.ConfigMessageKey { return i18n.FFC(language.AmericanEnglish, key, translation, fieldType) } +var urlStringType = "URL " + i18n.StringType +var addressStringType = "Address " + i18n.StringType + //revive:disable var ( ConfigGlobalMigrationsAuto = ffc("config.global.migrations.auto", "Enables automatic database migrations", i18n.BooleanType) @@ -41,7 +44,7 @@ var ( ConfigSPIAddress = ffc("config.spi.address", "The IP address on which the admin HTTP API should listen", "IP Address "+i18n.StringType) ConfigSPIEnabled = ffc("config.spi.enabled", "Enables the admin HTTP API", i18n.BooleanType) ConfigSPIPort = ffc("config.spi.port", "The port on which the admin HTTP API should listen", i18n.IntType) - ConfigSPIPublicURL = ffc("config.spi.publicURL", "The fully qualified public URL for the admin API. This is used for building URLs in HTTP responses and in OpenAPI Spec generation", "URL "+i18n.StringType) + ConfigSPIPublicURL = ffc("config.spi.publicURL", "The fully qualified public URL for the admin API. This is used for building URLs in HTTP responses and in OpenAPI Spec generation", urlStringType) ConfigSPIReadTimeout = ffc("config.spi.readTimeout", "The maximum time to wait when reading from an HTTP connection", i18n.TimeDurationType) ConfigSPIWriteTimeout = ffc("config.spi.writeTimeout", "The maximum time to wait when writing to an HTTP connection", i18n.TimeDurationType) @@ -75,17 +78,17 @@ var ( ConfigBlockchainEthereumAddressResolverRetainOriginal = ffc("config.blockchain.ethereum.addressResolver.retainOriginal", "When true the original pre-resolved string is retained after the lookup, and passed down to Ethconnect as the from address", i18n.BooleanType) ConfigBlockchainEthereumAddressResolverURL = ffc("config.blockchain.ethereum.addressResolver.url", "The URL of the Address Resolver", i18n.StringType) ConfigBlockchainEthereumAddressResolverURLTemplate = ffc("config.blockchain.ethereum.addressResolver.urlTemplate", "The URL Go template string to use when calling the Address Resolver. The template input contains '.Key' and '.Intent' string variables", i18n.GoTemplateType) - ConfigBlockchainEthereumAddressResolverProxyURL = ffc("config.blockchain.ethereum.addressResolver.proxy.url", "Optional HTTP proxy server to use when connecting to the Address Resolver", "URL "+i18n.StringType) + ConfigBlockchainEthereumAddressResolverProxyURL = ffc("config.blockchain.ethereum.addressResolver.proxy.url", "Optional HTTP proxy server to use when connecting to the Address Resolver", urlStringType) ConfigBlockchainEthereumEthconnectBatchSize = ffc("config.blockchain.ethereum.ethconnect.batchSize", "The number of events Ethconnect should batch together for delivery to FireFly core. Only applies when automatically creating a new event stream", i18n.IntType) ConfigBlockchainEthereumEthconnectBatchTimeout = ffc("config.blockchain.ethereum.ethconnect.batchTimeout", "How long Ethconnect should wait for new events to arrive and fill a batch, before sending the batch to FireFly core. Only applies when automatically creating a new event stream", i18n.TimeDurationType) - ConfigBlockchainEthereumEthconnectInstance = ffc("config.blockchain.ethereum.ethconnect.instance", "The Ethereum address of the FireFly BatchPin smart contract that has been deployed to the blockchain (deprecated - use namespaces.predefined[].multiparty.contract[].location.address)", "Address "+i18n.StringType) - ConfigBlockchainEthereumEthconnectFromBlock = ffc("config.blockchain.ethereum.ethconnect.fromBlock", "The first event this FireFly instance should listen to from the BatchPin smart contract. Default=0. Only affects initial creation of the event stream (deprecated - use namespaces.predefined[].multiparty.contract[].location.firstEvent)", "Address "+i18n.StringType) + ConfigBlockchainEthereumEthconnectInstance = ffc("config.blockchain.ethereum.ethconnect.instance", "The Ethereum address of the FireFly BatchPin smart contract that has been deployed to the blockchain (deprecated - use namespaces.predefined[].multiparty.contract[].location.address)", addressStringType) + ConfigBlockchainEthereumEthconnectFromBlock = ffc("config.blockchain.ethereum.ethconnect.fromBlock", "The first event this FireFly instance should listen to from the BatchPin smart contract. Default=0. Only affects initial creation of the event stream (deprecated - use namespaces.predefined[].multiparty.contract[].location.firstEvent)", addressStringType) ConfigBlockchainEthereumEthconnectPrefixLong = ffc("config.blockchain.ethereum.ethconnect.prefixLong", "The prefix that will be used for Ethconnect specific HTTP headers when FireFly makes requests to Ethconnect", i18n.StringType) ConfigBlockchainEthereumEthconnectPrefixShort = ffc("config.blockchain.ethereum.ethconnect.prefixShort", "The prefix that will be used for Ethconnect specific query parameters when FireFly makes requests to Ethconnect", i18n.StringType) ConfigBlockchainEthereumEthconnectTopic = ffc("config.blockchain.ethereum.ethconnect.topic", "The websocket listen topic that the node should register on, which is important if there are multiple nodes using a single ethconnect", i18n.StringType) - ConfigBlockchainEthereumEthconnectURL = ffc("config.blockchain.ethereum.ethconnect.url", "The URL of the Ethconnect instance", "URL "+i18n.StringType) - ConfigBlockchainEthereumEthconnectProxyURL = ffc("config.blockchain.ethereum.ethconnect.proxy.url", "Optional HTTP proxy server to use when connecting to Ethconnect", "URL "+i18n.StringType) + ConfigBlockchainEthereumEthconnectURL = ffc("config.blockchain.ethereum.ethconnect.url", "The URL of the Ethconnect instance", urlStringType) + ConfigBlockchainEthereumEthconnectProxyURL = ffc("config.blockchain.ethereum.ethconnect.proxy.url", "Optional HTTP proxy server to use when connecting to Ethconnect", urlStringType) ConfigBlockchainEthereumFFTMURL = ffc("config.blockchain.ethereum.fftm.url", "The URL of the FireFly Transaction Manager runtime, if enabled", i18n.StringType) ConfigBlockchainEthereumFFTMProxyURL = ffc("config.blockchain.ethereum.fftm.proxy.url", "Optional HTTP proxy server to use when connecting to the Transaction Manager", i18n.StringType) @@ -98,8 +101,8 @@ var ( ConfigBlockchainFabricFabconnectPrefixShort = ffc("config.blockchain.fabric.fabconnect.prefixShort", "The prefix that will be used for Fabconnect specific query parameters when FireFly makes requests to Fabconnect", i18n.StringType) ConfigBlockchainFabricFabconnectSigner = ffc("config.blockchain.fabric.fabconnect.signer", "The Fabric signing key to use when submitting transactions to Fabconnect", i18n.StringType) ConfigBlockchainFabricFabconnectTopic = ffc("config.blockchain.fabric.fabconnect.topic", "The websocket listen topic that the node should register on, which is important if there are multiple nodes using a single Fabconnect", i18n.StringType) - ConfigBlockchainFabricFabconnectURL = ffc("config.blockchain.fabric.fabconnect.url", "The URL of the Fabconnect instance", "URL "+i18n.StringType) - ConfigBlockchainFabricFabconnectProxyURL = ffc("config.blockchain.fabric.fabconnect.proxy.url", "Optional HTTP proxy server to use when connecting to Fabconnect", "URL "+i18n.StringType) + ConfigBlockchainFabricFabconnectURL = ffc("config.blockchain.fabric.fabconnect.url", "The URL of the Fabconnect instance", urlStringType) + ConfigBlockchainFabricFabconnectProxyURL = ffc("config.blockchain.fabric.fabconnect.proxy.url", "Optional HTTP proxy server to use when connecting to Fabconnect", urlStringType) ConfigCacheEnabled = ffc("config.cache.enabled", "Enables caching, defaults to true", i18n.BooleanType) @@ -127,6 +130,10 @@ var ( ConfigCacheBlockchainTTL = ffc("config.cache.blockchain.ttl", "Time to live of cached items for blockchain", i18n.StringType) ConfigCacheOperationsLimit = ffc("config.cache.operations.limit", "Max number of cached items for operations", i18n.IntType) ConfigCacheOperationsTTL = ffc("config.cache.operations.ttl", "Time to live of cached items for operations", i18n.StringType) + ConfigCacheTokenPoolLimit = ffc("config.cache.tokenpool.limit", "Max number of cached items for token pools", i18n.IntType) + ConfigCacheTokenPoolTTL = ffc("config.cache.tokenpool.ttl", "Time to live of cached items for token pool", i18n.StringType) + ConfigCacheMethodsLimit = ffc("config.cache.methods.limit", "Max number of cached items for schema validations on blockchain methods", i18n.IntType) + ConfigCacheMethodsTTL = ffc("config.cache.methods.ttl", "Time to live of cached items for schema validations on blockchain methods", i18n.StringType) ConfigPluginDatabase = ffc("config.plugins.database", "The list of configured Database plugins", i18n.StringType) ConfigPluginDatabaseName = ffc("config.plugins.database[].name", "The name of the Database plugin", i18n.StringType) @@ -162,31 +169,60 @@ var ( ConfigPluginBlockchainEthereumAddressResolverURL = ffc("config.plugins.blockchain[].ethereum.addressResolver.url", "The URL of the Address Resolver", i18n.StringType) ConfigPluginBlockchainEthereumAddressResolverURLTemplate = ffc("config.plugins.blockchain[].ethereum.addressResolver.urlTemplate", "The URL Go template string to use when calling the Address Resolver. The template input contains '.Key' and '.Intent' string variables.", i18n.GoTemplateType) - ConfigPluginBlockchainEthereumAddressResolverProxyURL = ffc("config.plugins.blockchain[].ethereum.addressResolver.proxy.url", "Optional HTTP proxy server to use when connecting to the Address Resolver", "URL "+i18n.StringType) - - ConfigPluginBlockchainEthereumEthconnectBatchSize = ffc("config.plugins.blockchain[].ethereum.ethconnect.batchSize", "The number of events Ethconnect should batch together for delivery to FireFly core. Only applies when automatically creating a new event stream", i18n.IntType) - ConfigPluginBlockchainEthereumEthconnectBatchTimeout = ffc("config.plugins.blockchain[].ethereum.ethconnect.batchTimeout", "How long Ethconnect should wait for new events to arrive and fill a batch, before sending the batch to FireFly core. Only applies when automatically creating a new event stream", i18n.TimeDurationType) - ConfigPluginBlockchainEthereumEthconnectInstance = ffc("config.plugins.blockchain[].ethereum.ethconnect.instance", "The Ethereum address of the FireFly BatchPin smart contract that has been deployed to the blockchain", "Address "+i18n.StringType) - ConfigPluginBlockchainEthereumEthconnectFromBlock = ffc("config.plugins.blockchain[].ethereum.ethconnect.fromBlock", "The first event this FireFly instance should listen to from the BatchPin smart contract. Default=0. Only affects initial creation of the event stream", "Address "+i18n.StringType) - ConfigPluginBlockchainEthereumEthconnectPrefixLong = ffc("config.plugins.blockchain[].ethereum.ethconnect.prefixLong", "The prefix that will be used for Ethconnect specific HTTP headers when FireFly makes requests to Ethconnect", i18n.StringType) - ConfigPluginBlockchainEthereumEthconnectPrefixShort = ffc("config.plugins.blockchain[].ethereum.ethconnect.prefixShort", "The prefix that will be used for Ethconnect specific query parameters when FireFly makes requests to Ethconnect", i18n.StringType) - ConfigPluginBlockchainEthereumEthconnectTopic = ffc("config.plugins.blockchain[].ethereum.ethconnect.topic", "The websocket listen topic that the node should register on, which is important if there are multiple nodes using a single ethconnect", i18n.StringType) - ConfigPluginBlockchainEthereumEthconnectURL = ffc("config.plugins.blockchain[].ethereum.ethconnect.url", "The URL of the Ethconnect instance", "URL "+i18n.StringType) - ConfigPluginBlockchainEthereumEthconnectProxyURL = ffc("config.plugins.blockchain[].ethereum.ethconnect.proxy.url", "Optional HTTP proxy server to use when connecting to Ethconnect", "URL "+i18n.StringType) + ConfigPluginBlockchainEthereumAddressResolverProxyURL = ffc("config.plugins.blockchain[].ethereum.addressResolver.proxy.url", "Optional HTTP proxy server to use when connecting to the Address Resolver", urlStringType) + + ConfigPluginBlockchainEthereumEthconnectBackgroundStart = ffc("config.plugins.blockchain[].ethereum.ethconnect.backgroundStart.enabled", "Start the Ethconnect plugin in the background and enter retry loop if failed to start", i18n.BooleanType) + ConfigPluginBlockchainEthereumEthconnectBackgroundStartInitialDelay = ffc("config.plugins.blockchain[].ethereum.ethconnect.backgroundStart.initialDelay", "Delay between restarts in the case where we retry to restart the ethereum plugin", i18n.TimeDurationType) + ConfigPluginBlockchainEthereumEthconnectBackgroundStartMaxDelay = ffc("config.plugins.blockchain[].ethereum.ethconnect.backgroundStart.maxDelay", "Max delay between restarts in the case where we retry to restart the ethereum plugin", i18n.TimeDurationType) + ConfigPluginBlockchainEthereumEthconnectBackgroundStartFactor = ffc("config.plugins.blockchain[].ethereum.ethconnect.backgroundStart.factor", "Set the factor by which the delay increases when retrying", i18n.FloatType) + ConfigPluginBlockchainEthereumEthconnectBatchSize = ffc("config.plugins.blockchain[].ethereum.ethconnect.batchSize", "The number of events Ethconnect should batch together for delivery to FireFly core. Only applies when automatically creating a new event stream", i18n.IntType) + ConfigPluginBlockchainEthereumEthconnectBatchTimeout = ffc("config.plugins.blockchain[].ethereum.ethconnect.batchTimeout", "How long Ethconnect should wait for new events to arrive and fill a batch, before sending the batch to FireFly core. Only applies when automatically creating a new event stream", i18n.TimeDurationType) + ConfigPluginBlockchainEthereumEthconnectInstance = ffc("config.plugins.blockchain[].ethereum.ethconnect.instance", "The Ethereum address of the FireFly BatchPin smart contract that has been deployed to the blockchain", addressStringType) + ConfigPluginBlockchainEthereumEthconnectFromBlock = ffc("config.plugins.blockchain[].ethereum.ethconnect.fromBlock", "The first event this FireFly instance should listen to from the BatchPin smart contract. Default=0. Only affects initial creation of the event stream", addressStringType) + ConfigPluginBlockchainEthereumEthconnectPrefixLong = ffc("config.plugins.blockchain[].ethereum.ethconnect.prefixLong", "The prefix that will be used for Ethconnect specific HTTP headers when FireFly makes requests to Ethconnect", i18n.StringType) + ConfigPluginBlockchainEthereumEthconnectPrefixShort = ffc("config.plugins.blockchain[].ethereum.ethconnect.prefixShort", "The prefix that will be used for Ethconnect specific query parameters when FireFly makes requests to Ethconnect", i18n.StringType) + ConfigPluginBlockchainEthereumEthconnectTopic = ffc("config.plugins.blockchain[].ethereum.ethconnect.topic", "The websocket listen topic that the node should register on, which is important if there are multiple nodes using a single ethconnect", i18n.StringType) + ConfigPluginBlockchainEthereumEthconnectURL = ffc("config.plugins.blockchain[].ethereum.ethconnect.url", "The URL of the Ethconnect instance", urlStringType) + ConfigPluginBlockchainEthereumEthconnectProxyURL = ffc("config.plugins.blockchain[].ethereum.ethconnect.proxy.url", "Optional HTTP proxy server to use when connecting to Ethconnect", urlStringType) ConfigPluginBlockchainEthereumFFTMURL = ffc("config.plugins.blockchain[].ethereum.fftm.url", "The URL of the FireFly Transaction Manager runtime, if enabled", i18n.StringType) ConfigPluginBlockchainEthereumFFTMProxyURL = ffc("config.plugins.blockchain[].ethereum.fftm.proxy.url", "Optional HTTP proxy server to use when connecting to the Transaction Manager", i18n.StringType) - ConfigPluginBlockchainFabricFabconnectBatchSize = ffc("config.plugins.blockchain[].fabric.fabconnect.batchSize", "The number of events Fabconnect should batch together for delivery to FireFly core. Only applies when automatically creating a new event stream", i18n.IntType) - ConfigPluginBlockchainFabricFabconnectBatchTimeout = ffc("config.plugins.blockchain[].fabric.fabconnect.batchTimeout", "The maximum amount of time to wait for a batch to complete", i18n.TimeDurationType) - ConfigPluginBlockchainFabricFabconnectPrefixLong = ffc("config.plugins.blockchain[].fabric.fabconnect.prefixLong", "The prefix that will be used for Fabconnect specific HTTP headers when FireFly makes requests to Fabconnect", i18n.StringType) - ConfigPluginBlockchainFabricFabconnectPrefixShort = ffc("config.plugins.blockchain[].fabric.fabconnect.prefixShort", "The prefix that will be used for Fabconnect specific query parameters when FireFly makes requests to Fabconnect", i18n.StringType) - ConfigPluginBlockchainFabricFabconnectSigner = ffc("config.plugins.blockchain[].fabric.fabconnect.signer", "The Fabric signing key to use when submitting transactions to Fabconnect", i18n.StringType) - ConfigPluginBlockchainFabricFabconnectTopic = ffc("config.plugins.blockchain[].fabric.fabconnect.topic", "The websocket listen topic that the node should register on, which is important if there are multiple nodes using a single Fabconnect", i18n.StringType) - ConfigPluginBlockchainFabricFabconnectURL = ffc("config.plugins.blockchain[].fabric.fabconnect.url", "The URL of the Fabconnect instance", "URL "+i18n.StringType) - ConfigPluginBlockchainFabricFabconnectProxyURL = ffc("config.plugins.blockchain[].fabric.fabconnect.proxy.url", "Optional HTTP proxy server to use when connecting to Fabconnect", "URL "+i18n.StringType) - ConfigPluginBlockchainFabricFabconnectChaincode = ffc("config.plugins.blockchain[].fabric.fabconnect.chaincode", "The name of the Fabric chaincode that FireFly will use for BatchPin transactions (deprecated - use fireflyContract[].chaincode)", i18n.StringType) - ConfigPluginBlockchainFabricFabconnectChannel = ffc("config.plugins.blockchain[].fabric.fabconnect.channel", "The Fabric channel that FireFly will use for BatchPin transactions", i18n.StringType) + ConfigPluginBlockchainTezosAddressResolverAlwaysResolve = ffc("config.plugins.blockchain[].tezos.addressResolver.alwaysResolve", "Causes the address resolver to be invoked on every API call that submits a signing key. Also disables any result caching", i18n.BooleanType) + + ConfigPluginBlockchainTezosAddressResolverResponseField = ffc("config.plugins.blockchain[].tezos.addressResolver.responseField", "The name of a JSON field that is provided in the response, that contains the tezos address (default `address`)", i18n.StringType) + ConfigPluginBlockchainTezosAddressResolverRetainOriginal = ffc("config.plugins.blockchain[].tezos.addressResolver.retainOriginal", "When true the original pre-resolved string is retained after the lookup, and passed down to Tezosconnect as the from address", i18n.BooleanType) + ConfigPluginBlockchainTezosAddressResolverURL = ffc("config.plugins.blockchain[].tezos.addressResolver.url", "The URL of the Address Resolver", i18n.StringType) + ConfigPluginBlockchainTezosAddressResolverURLTemplate = ffc("config.plugins.blockchain[].tezos.addressResolver.urlTemplate", "The URL Go template string to use when calling the Address Resolver. The template input contains '.Key' and '.Intent' string variables.", i18n.GoTemplateType) + + ConfigPluginBlockchainTezosTezosconnectBackgroundStart = ffc("config.plugins.blockchain[].tezos.tezosconnect.backgroundStart.enabled", "Start the Tezosconnect plugin in the background and enter retry loop if failed to start", i18n.BooleanType) + ConfigPluginBlockchainTezosTezosconnectBackgroundStartInitialDelay = ffc("config.plugins.blockchain[].tezos.tezosconnect.backgroundStart.initialDelay", "Delay between restarts in the case where we retry to restart the tezos plugin", i18n.TimeDurationType) + ConfigPluginBlockchainTezosTezosconnectBackgroundStartMaxDelay = ffc("config.plugins.blockchain[].tezos.tezosconnect.backgroundStart.maxDelay", "Max delay between restarts in the case where we retry to restart the tezos plugin", i18n.TimeDurationType) + ConfigPluginBlockchainTezosTezosconnectBackgroundStartFactor = ffc("config.plugins.blockchain[].tezos.tezosconnect.backgroundStart.factor", "Set the factor by which the delay increases when retrying", i18n.FloatType) + ConfigPluginBlockchainTezosTezosconnectBatchSize = ffc("config.plugins.blockchain[].tezos.tezosconnect.batchSize", "The number of events Tezosconnect should batch together for delivery to FireFly core. Only applies when automatically creating a new event stream", i18n.IntType) + ConfigPluginBlockchainTezosTezosconnectBatchTimeout = ffc("config.plugins.blockchain[].tezos.tezosconnect.batchTimeout", "How long Tezosconnect should wait for new events to arrive and fill a batch, before sending the batch to FireFly core. Only applies when automatically creating a new event stream", i18n.TimeDurationType) + ConfigPluginBlockchainTezosTezosconnectInstance = ffc("config.plugins.blockchain[].tezos.tezosconnect.instance", "The Tezosconnect address of the FireFly BatchPin smart contract that has been deployed to the blockchain", addressStringType) + ConfigPluginBlockchainTezosTezosconnectFromBlock = ffc("config.plugins.blockchain[].tezos.tezosconnect.fromBlock", "The first event this FireFly instance should listen to from the BatchPin smart contract. Default=0. Only affects initial creation of the event stream", addressStringType) + ConfigPluginBlockchainTezosTezosconnectPrefixLong = ffc("config.plugins.blockchain[].tezos.tezosconnect.prefixLong", "The prefix that will be used for Tezosconnect specific HTTP headers when FireFly makes requests to Tezosconnect", i18n.StringType) + ConfigPluginBlockchainTezosTezosconnectPrefixShort = ffc("config.plugins.blockchain[].tezos.tezosconnect.prefixShort", "The prefix that will be used for Tezosconnect specific query parameters when FireFly makes requests to Tezosconnect", i18n.StringType) + ConfigPluginBlockchainTezosTezosconnectTopic = ffc("config.plugins.blockchain[].tezos.tezosconnect.topic", "The websocket listen topic that the node should register on, which is important if there are multiple nodes using a single tezosconnect", i18n.StringType) + ConfigPluginBlockchainTezosTezosconnectURL = ffc("config.plugins.blockchain[].tezos.tezosconnect.url", "The URL of the Tezosconnect instance", urlStringType) + ConfigPluginBlockchainTezosTezosconnectProxyURL = ffc("config.plugins.blockchain[].tezos.tezosconnect.proxy.url", "Optional HTTP proxy server to use when connecting to Tezosconnect", urlStringType) + + ConfigPluginBlockchainFabricFabconnectBackgroundStart = ffc("config.plugins.blockchain[].fabric.fabconnect.backgroundStart.enabled", "Start the fabric plugin in the background and enter retry loop if failed to start", i18n.BooleanType) + ConfigPluginBlockchainFabricFabconnectBackgroundStartInitialDelay = ffc("config.plugins.blockchain[].fabric.fabconnect.backgroundStart.initialDelay", "Delay between restarts in the case where we retry to restart the fabric plugin", i18n.TimeDurationType) + ConfigPluginBlockchainFabricFabconnectBackgroundStartMaxDelay = ffc("config.plugins.blockchain[].fabric.fabconnect.backgroundStart.maxDelay", "Max delay between restarts in the case where we retry to restart the fabric plugin", i18n.TimeDurationType) + ConfigPluginBlockchainFabricFabconnectBackgroundStartFactor = ffc("config.plugins.blockchain[].fabric.fabconnect.backgroundStart.factor", "Set the factor by which the delay increases when retrying", i18n.FloatType) + ConfigPluginBlockchainFabricFabconnectBatchSize = ffc("config.plugins.blockchain[].fabric.fabconnect.batchSize", "The number of events Fabconnect should batch together for delivery to FireFly core. Only applies when automatically creating a new event stream", i18n.IntType) + ConfigPluginBlockchainFabricFabconnectBatchTimeout = ffc("config.plugins.blockchain[].fabric.fabconnect.batchTimeout", "The maximum amount of time to wait for a batch to complete", i18n.TimeDurationType) + ConfigPluginBlockchainFabricFabconnectPrefixLong = ffc("config.plugins.blockchain[].fabric.fabconnect.prefixLong", "The prefix that will be used for Fabconnect specific HTTP headers when FireFly makes requests to Fabconnect", i18n.StringType) + ConfigPluginBlockchainFabricFabconnectPrefixShort = ffc("config.plugins.blockchain[].fabric.fabconnect.prefixShort", "The prefix that will be used for Fabconnect specific query parameters when FireFly makes requests to Fabconnect", i18n.StringType) + ConfigPluginBlockchainFabricFabconnectSigner = ffc("config.plugins.blockchain[].fabric.fabconnect.signer", "The Fabric signing key to use when submitting transactions to Fabconnect", i18n.StringType) + ConfigPluginBlockchainFabricFabconnectTopic = ffc("config.plugins.blockchain[].fabric.fabconnect.topic", "The websocket listen topic that the node should register on, which is important if there are multiple nodes using a single Fabconnect", i18n.StringType) + ConfigPluginBlockchainFabricFabconnectURL = ffc("config.plugins.blockchain[].fabric.fabconnect.url", "The URL of the Fabconnect instance", urlStringType) + ConfigPluginBlockchainFabricFabconnectProxyURL = ffc("config.plugins.blockchain[].fabric.fabconnect.proxy.url", "Optional HTTP proxy server to use when connecting to Fabconnect", urlStringType) + ConfigPluginBlockchainFabricFabconnectChaincode = ffc("config.plugins.blockchain[].fabric.fabconnect.chaincode", "The name of the Fabric chaincode that FireFly will use for BatchPin transactions (deprecated - use fireflyContract[].chaincode)", i18n.StringType) + ConfigPluginBlockchainFabricFabconnectChannel = ffc("config.plugins.blockchain[].fabric.fabconnect.channel", "The Fabric channel that FireFly will use for BatchPin transactions", i18n.StringType) ConfigBroadcastBatchAgentTimeout = ffc("config.broadcast.batch.agentTimeout", "How long to keep around a batching agent for a sending identity before disposal", i18n.StringType) ConfigBroadcastBatchPayloadLimit = ffc("config.broadcast.batch.payloadLimit", "The maximum payload size of a batch for broadcast messages", i18n.ByteSizeType) @@ -211,19 +247,23 @@ var ( ConfigDataexchangeFfdxInitEnabled = ffc("config.dataexchange.ffdx.initEnabled", "Instructs FireFly to always post all current nodes to the `/init` API before connecting or reconnecting to the connector", i18n.BooleanType) ConfigDataexchangeFfdxManifestEnabled = ffc("config.dataexchange.ffdx.manifestEnabled", "Determines whether to require+validate a manifest from other DX instances in the network. Must be supported by the connector", i18n.StringType) - ConfigDataexchangeFfdxURL = ffc("config.dataexchange.ffdx.url", "The URL of the Data Exchange instance", "URL "+i18n.StringType) + ConfigDataexchangeFfdxURL = ffc("config.dataexchange.ffdx.url", "The URL of the Data Exchange instance", urlStringType) - ConfigDataexchangeFfdxProxyURL = ffc("config.dataexchange.ffdx.proxy.url", "Optional HTTP proxy server to use when connecting to the Data Exchange", "URL "+i18n.StringType) + ConfigDataexchangeFfdxProxyURL = ffc("config.dataexchange.ffdx.proxy.url", "Optional HTTP proxy server to use when connecting to the Data Exchange", urlStringType) ConfigPluginDataexchange = ffc("config.plugins.dataexchange", "The array of configured Data Exchange plugins ", i18n.StringType) ConfigPluginDataexchangeType = ffc("config.plugins.dataexchange[].type", "The Data Exchange plugin to use", i18n.StringType) ConfigPluginDataexchangeName = ffc("config.plugins.dataexchange[].name", "The name of the configured Data Exchange plugin", i18n.StringType) - ConfigPluginDataexchangeFfdxInitEnabled = ffc("config.plugins.dataexchange[].ffdx.initEnabled", "Instructs FireFly to always post all current nodes to the `/init` API before connecting or reconnecting to the connector", i18n.BooleanType) - ConfigPluginDataexchangeFfdxManifestEnabled = ffc("config.plugins.dataexchange[].ffdx.manifestEnabled", "Determines whether to require+validate a manifest from other DX instances in the network. Must be supported by the connector", i18n.StringType) - ConfigPluginDataexchangeFfdxURL = ffc("config.plugins.dataexchange[].ffdx.url", "The URL of the Data Exchange instance", "URL "+i18n.StringType) + ConfigPluginDataexchangeFfdxInitEnabled = ffc("config.plugins.dataexchange[].ffdx.initEnabled", "Instructs FireFly to always post all current nodes to the `/init` API before connecting or reconnecting to the connector", i18n.BooleanType) + ConfigPluginDataexchangeFfdxManifestEnabled = ffc("config.plugins.dataexchange[].ffdx.manifestEnabled", "Determines whether to require+validate a manifest from other DX instances in the network. Must be supported by the connector", i18n.StringType) + ConfigPluginDataexchangeFfdxURL = ffc("config.plugins.dataexchange[].ffdx.url", "The URL of the Data Exchange instance", urlStringType) + ConfigPluginDataexchangeFfdxBackgroundStart = ffc("config.plugins.dataexchange[].ffdx.backgroundStart.enabled", "Start the data exchange plugin in the background and enter retry loop if failed to start", i18n.BooleanType) + ConfigPluginDataexchangeFfdxBackgroundStartInitialDelay = ffc("config.plugins.dataexchange[].ffdx.backgroundStart.initialDelay", "Delay between restarts in the case where we retry to restart the data exchange plugin", i18n.TimeDurationType) + ConfigPluginDataexchangeFfdxBackgroundStartMaxDelay = ffc("config.plugins.dataexchange[].ffdx.backgroundStart.maxDelay", "Max delay between restarts in the case where we retry to restart the data exchange plugin", i18n.TimeDurationType) + ConfigPluginDataexchangeFfdxBackgroundStartFactor = ffc("config.plugins.dataexchange[].ffdx.backgroundStart.factor", "Set the factor by which the delay increases when retrying", i18n.FloatType) - ConfigPluginDataexchangeFfdxProxyURL = ffc("config.plugins.dataexchange[].ffdx.proxy.url", "Optional HTTP proxy server to use when connecting to the Data Exchange", "URL "+i18n.StringType) + ConfigPluginDataexchangeFfdxProxyURL = ffc("config.plugins.dataexchange[].ffdx.proxy.url", "Optional HTTP proxy server to use when connecting to the Data Exchange", urlStringType) ConfigDebugPort = ffc("config.debug.port", "An HTTP port on which to enable the go debugger", i18n.IntType) ConfigDebugAddress = ffc("config.debug.address", "The HTTP interface the go debugger binds to", i18n.StringType) @@ -251,7 +291,7 @@ var ( ConfigHTTPAddress = ffc("config.http.address", "The IP address on which the HTTP API should listen", "IP Address "+i18n.StringType) ConfigHTTPPort = ffc("config.http.port", "The port on which the HTTP API should listen", i18n.IntType) - ConfigHTTPPublicURL = ffc("config.http.publicURL", "The fully qualified public URL for the API. This is used for building URLs in HTTP responses and in OpenAPI Spec generation", "URL "+i18n.StringType) + ConfigHTTPPublicURL = ffc("config.http.publicURL", "The fully qualified public URL for the API. This is used for building URLs in HTTP responses and in OpenAPI Spec generation", urlStringType) ConfigHTTPReadTimeout = ffc("config.http.readTimeout", "The maximum time to wait when reading from an HTTP connection", i18n.TimeDurationType) ConfigHTTPWriteTimeout = ffc("config.http.writeTimeout", "The maximum time to wait when writing to an HTTP connection", i18n.TimeDurationType) @@ -276,21 +316,28 @@ var ( ConfigMessageWriterBatchTimeout = ffc("config.message.writer.batchTimeout", "How long to wait for more messages to arrive before flushing the batch", i18n.TimeDurationType) ConfigMessageWriterCount = ffc("config.message.writer.count", "The number of message writer workers", i18n.IntType) + ConfigTransactionWriterBatchMaxTransactions = ffc("config.transaction.writer.batchMaxTransactions", "The maximum number of transaction inserts to include in a batch", i18n.IntType) + ConfigTransactionWriterBatchTimeout = ffc("config.transaction.writer.batchTimeout", "How long to wait for more transactions to arrive before flushing the batch", i18n.TimeDurationType) + ConfigTransactionWriterCount = ffc("config.transaction.writer.count", "The number of message writer workers", i18n.IntType) + ConfigMetricsAddress = ffc("config.metrics.address", "The IP address on which the metrics HTTP API should listen", i18n.IntType) ConfigMetricsEnabled = ffc("config.metrics.enabled", "Enables the metrics API", i18n.BooleanType) ConfigMetricsPath = ffc("config.metrics.path", "The path from which to serve the Prometheus metrics", i18n.StringType) ConfigMetricsPort = ffc("config.metrics.port", "The port on which the metrics HTTP API should listen", i18n.IntType) - ConfigMetricsPublicURL = ffc("config.metrics.publicURL", "The fully qualified public URL for the metrics API. This is used for building URLs in HTTP responses and in OpenAPI Spec generation", "URL "+i18n.StringType) + ConfigMetricsPublicURL = ffc("config.metrics.publicURL", "The fully qualified public URL for the metrics API. This is used for building URLs in HTTP responses and in OpenAPI Spec generation", urlStringType) ConfigMetricsReadTimeout = ffc("config.metrics.readTimeout", "The maximum time to wait when reading from an HTTP connection", i18n.TimeDurationType) ConfigMetricsWriteTimeout = ffc("config.metrics.writeTimeout", "The maximum time to wait when writing to an HTTP connection", i18n.TimeDurationType) - ConfigNamespacesDefault = ffc("config.namespaces.default", "The default namespace - must be in the predefined list", i18n.StringType) - ConfigNamespacesPredefined = ffc("config.namespaces.predefined", "A list of namespaces to ensure exists, without requiring a broadcast from the network", "List "+i18n.StringType) - ConfigNamespacesPredefinedName = ffc("config.namespaces.predefined[].name", "The name of the namespace (must be unique)", i18n.StringType) - ConfigNamespacesPredefinedDescription = ffc("config.namespaces.predefined[].description", "A description for the namespace", i18n.StringType) - ConfigNamespacesPredefinedPlugins = ffc("config.namespaces.predefined[].plugins", "The list of plugins for this namespace", i18n.StringType) - ConfigNamespacesPredefinedDefaultKey = ffc("config.namespaces.predefined[].defaultKey", "A default signing key for blockchain transactions within this namespace", i18n.StringType) - ConfigNamespacesPredefinedKeyNormalization = ffc("config.namespaces.predefined[].asset.manager.keyNormalization", "Mechanism to normalize keys before using them. Valid options are `blockchain_plugin` - use blockchain plugin (default) or `none` - do not attempt normalization", i18n.StringType) + ConfigNamespacesDefault = ffc("config.namespaces.default", "The default namespace - must be in the predefined list", i18n.StringType) + ConfigNamespacesPredefined = ffc("config.namespaces.predefined", "A list of namespaces to ensure exists, without requiring a broadcast from the network", "List "+i18n.StringType) + ConfigNamespacesPredefinedName = ffc("config.namespaces.predefined[].name", "The name of the namespace (must be unique)", i18n.StringType) + ConfigNamespacesPredefinedDescription = ffc("config.namespaces.predefined[].description", "A description for the namespace", i18n.StringType) + ConfigNamespacesPredefinedPlugins = ffc("config.namespaces.predefined[].plugins", "The list of plugins for this namespace", i18n.StringType) + ConfigNamespacesPredefinedDefaultKey = ffc("config.namespaces.predefined[].defaultKey", "A default signing key for blockchain transactions within this namespace", i18n.StringType) + ConfigNamespacesPredefinedKeyNormalization = ffc("config.namespaces.predefined[].asset.manager.keyNormalization", "Mechanism to normalize keys before using them. Valid options are `blockchain_plugin` - use blockchain plugin (default) or `none` - do not attempt normalization", i18n.StringType) + ConfigNamespacesPredefinedTLSConfigs = ffc("config.namespaces.predefined[].tlsConfigs", "Supply a set of tls certificates to be used by subscriptions for this namespace", "List "+i18n.StringType) + ConfigNamespacesPredefinedTLSConfigsName = ffc("config.namespaces.predefined[].tlsConfigs[].name", "Name of the TLS Config", i18n.StringType) + // ConfigNamespacesPredefinedTLSConfigsTLS = ffc("config.namespaces.predefined[].tlsConfigs[].tls", "Specify the path to a CA, Cert and Key for TLS communication", i18n.StringType) ConfigNamespacesMultipartyEnabled = ffc("config.namespaces.predefined[].multiparty.enabled", "Enables multi-party mode for this namespace (defaults to true if an org name or key is configured, either here or at the root level)", i18n.BooleanType) ConfigNamespacesMultipartyNetworkNamespace = ffc("config.namespaces.predefined[].multiparty.networknamespace", "The shared namespace name to be sent in multiparty messages, if it differs from the local namespace name", i18n.StringType) ConfigNamespacesMultipartyOrgName = ffc("config.namespaces.predefined[].multiparty.org.name", "A short name for the local root organization within this namespace", i18n.StringType) @@ -323,33 +370,37 @@ var ( ConfigPrivatemessagingBatchTimeout = ffc("config.privatemessaging.batch.timeout", "The timeout to wait for a batch to fill, before sending", i18n.TimeDurationType) ConfigSharedstorageType = ffc("config.sharedstorage.type", "The Shared Storage plugin to use", i18n.StringType) - ConfigSharedstorageIpfsAPIURL = ffc("config.sharedstorage.ipfs.api.url", "The URL for the IPFS API", "URL "+i18n.StringType) - ConfigSharedstorageIpfsAPIProxyURL = ffc("config.sharedstorage.ipfs.api.proxy.url", "Optional HTTP proxy server to use when connecting to the IPFS API", "URL "+i18n.StringType) - ConfigSharedstorageIpfsGatewayURL = ffc("config.sharedstorage.ipfs.gateway.url", "The URL for the IPFS Gateway", "URL "+i18n.StringType) - ConfigSharedstorageIpfsGatewayProxyURL = ffc("config.sharedstorage.ipfs.gateway.proxy.url", "Optional HTTP proxy server to use when connecting to the IPFS Gateway", "URL "+i18n.StringType) + ConfigSharedstorageIpfsAPIURL = ffc("config.sharedstorage.ipfs.api.url", "The URL for the IPFS API", urlStringType) + ConfigSharedstorageIpfsAPIProxyURL = ffc("config.sharedstorage.ipfs.api.proxy.url", "Optional HTTP proxy server to use when connecting to the IPFS API", urlStringType) + ConfigSharedstorageIpfsGatewayURL = ffc("config.sharedstorage.ipfs.gateway.url", "The URL for the IPFS Gateway", urlStringType) + ConfigSharedstorageIpfsGatewayProxyURL = ffc("config.sharedstorage.ipfs.gateway.proxy.url", "Optional HTTP proxy server to use when connecting to the IPFS Gateway", urlStringType) ConfigPluginSharedstorage = ffc("config.plugins.sharedstorage", "The list of configured Shared Storage plugins", i18n.StringType) ConfigPluginSharedstorageName = ffc("config.plugins.sharedstorage[].name", "The name of the Shared Storage plugin to use", i18n.StringType) ConfigPluginSharedstorageType = ffc("config.plugins.sharedstorage[].type", "The Shared Storage plugin to use", i18n.StringType) - ConfigPluginSharedstorageIpfsAPIURL = ffc("config.plugins.sharedstorage[].ipfs.api.url", "The URL for the IPFS API", "URL "+i18n.StringType) - ConfigPluginSharedstorageIpfsAPIProxyURL = ffc("config.plugins.sharedstorage[].ipfs.api.proxy.url", "Optional HTTP proxy server to use when connecting to the IPFS API", "URL "+i18n.StringType) - ConfigPluginSharedstorageIpfsGatewayURL = ffc("config.plugins.sharedstorage[].ipfs.gateway.url", "The URL for the IPFS Gateway", "URL "+i18n.StringType) - ConfigPluginSharedstorageIpfsGatewayProxyURL = ffc("config.plugins.sharedstorage[].ipfs.gateway.proxy.url", "Optional HTTP proxy server to use when connecting to the IPFS Gateway", "URL "+i18n.StringType) + ConfigPluginSharedstorageIpfsAPIURL = ffc("config.plugins.sharedstorage[].ipfs.api.url", "The URL for the IPFS API", urlStringType) + ConfigPluginSharedstorageIpfsAPIProxyURL = ffc("config.plugins.sharedstorage[].ipfs.api.proxy.url", "Optional HTTP proxy server to use when connecting to the IPFS API", urlStringType) + ConfigPluginSharedstorageIpfsGatewayURL = ffc("config.plugins.sharedstorage[].ipfs.gateway.url", "The URL for the IPFS Gateway", urlStringType) + ConfigPluginSharedstorageIpfsGatewayProxyURL = ffc("config.plugins.sharedstorage[].ipfs.gateway.proxy.url", "Optional HTTP proxy server to use when connecting to the IPFS Gateway", urlStringType) ConfigSubscriptionMax = ffc("config.subscription.max", "The maximum number of pre-defined subscriptions that can exist (note for high fan-out consider connecting a dedicated pub/sub broker to the dispatcher)", i18n.IntType) ConfigSubscriptionDefaultsBatchSize = ffc("config.subscription.defaults.batchSize", "Default read ahead to enable for subscriptions that do not explicitly configure readahead", i18n.IntType) ConfigTokensName = ffc("config.tokens[].name", "A name to identify this token plugin", i18n.StringType) ConfigTokensPlugin = ffc("config.tokens[].plugin", "The type of the token plugin to use", i18n.StringType) - ConfigTokensURL = ffc("config.tokens[].url", "The URL of the token connector", "URL "+i18n.StringType) - ConfigTokensProxyURL = ffc("config.tokens[].proxy.url", "Optional HTTP proxy server to use when connecting to the token connector", "URL "+i18n.StringType) - - ConfigPluginTokens = ffc("config.plugins.tokens", "The token plugin configurations", i18n.StringType) - ConfigPluginTokensName = ffc("config.plugins.tokens[].name", "A name to identify this token plugin", i18n.StringType) - ConfigPluginTokensBroadcastName = ffc("config.plugins.tokens[].broadcastName", "The name to be used in broadcast messages related to this token plugin, if it differs from the local plugin name", i18n.StringType) - ConfigPluginTokensType = ffc("config.plugins.tokens[].type", "The type of the token plugin to use", i18n.StringType) - ConfigPluginTokensURL = ffc("config.plugins.tokens[].fftokens.url", "The URL of the token connector", "URL "+i18n.StringType) - ConfigPluginTokensProxyURL = ffc("config.plugins.tokens[].fftokens.proxy.url", "Optional HTTP proxy server to use when connecting to the token connector", "URL "+i18n.StringType) + ConfigTokensURL = ffc("config.tokens[].url", "The URL of the token connector", urlStringType) + ConfigTokensProxyURL = ffc("config.tokens[].proxy.url", "Optional HTTP proxy server to use when connecting to the token connector", urlStringType) + + ConfigPluginTokens = ffc("config.plugins.tokens", "The token plugin configurations", i18n.StringType) + ConfigPluginTokensName = ffc("config.plugins.tokens[].name", "A name to identify this token plugin", i18n.StringType) + ConfigPluginTokensBroadcastName = ffc("config.plugins.tokens[].broadcastName", "The name to be used in broadcast messages related to this token plugin, if it differs from the local plugin name", i18n.StringType) + ConfigPluginTokensType = ffc("config.plugins.tokens[].type", "The type of the token plugin to use", i18n.StringType) + ConfigPluginTokensURL = ffc("config.plugins.tokens[].fftokens.url", "The URL of the token connector", urlStringType) + ConfigPluginTokensProxyURL = ffc("config.plugins.tokens[].fftokens.proxy.url", "Optional HTTP proxy server to use when connecting to the token connector", urlStringType) + ConfigPluginTokensBackgroundStart = ffc("config.plugins.tokens[].fftokens.backgroundStart.enabled", "Start the tokens plugin in the background and enter retry loop if failed to start", i18n.BooleanType) + ConfigPluginTokensBackgroundStartInitialDelay = ffc("config.plugins.tokens[].fftokens.backgroundStart.initialDelay", "Delay between restarts in the case where we retry to restart the token plugin", i18n.TimeDurationType) + ConfigPluginTokensBackgroundStartMaxDelay = ffc("config.plugins.tokens[].fftokens.backgroundStart.maxDelay", "Max delay between restarts in the case where we retry to restart the token plugin", i18n.TimeDurationType) + ConfigPluginTokensBackgroundStartFactor = ffc("config.plugins.tokens[].fftokens.backgroundStart.factor", "Set the factor by which the delay increases when retrying", i18n.FloatType) ConfigUIEnabled = ffc("config.ui.enabled", "Enables the web user interface", i18n.BooleanType) ConfigUIPath = ffc("config.ui.path", "The file system path which contains the static HTML, CSS, and JavaScript files for the user interface", i18n.StringType) diff --git a/internal/coremsgs/en_error_messages.go b/internal/coremsgs/en_error_messages.go index 15dd7d0c74..a2a82675a2 100644 --- a/internal/coremsgs/en_error_messages.go +++ b/internal/coremsgs/en_error_messages.go @@ -60,6 +60,7 @@ var ( MsgMissingPluginConfig = ffe("FF10138", "Missing configuration '%s' for %s") MsgMissingDataHashIndex = ffe("FF10139", "Missing data hash for index '%d' in message", 400) MsgInvalidEthAddress = ffe("FF10141", "Supplied ethereum address is invalid", 400) + MsgInvalidTezosAddress = ffe("FF10142", "Supplied tezos address is invalid", 400) Msg404NoResult = ffe("FF10143", "No result found", 404) MsgUnsupportedSQLOpInFilter = ffe("FF10150", "No SQL mapping implemented for filter operator '%s'", 400) MsgFilterSortDesc = ffe("FF10154", "Sort field. For multi-field sort use comma separated values (or multiple query values) with '-' prefix for descending") @@ -113,7 +114,7 @@ var ( MsgNodeAndOrgIDMustBeSet = ffe("FF10216", "node.name, org.name and org.key must be configured first", 409) MsgBlobStreamingFailed = ffe("FF10217", "Blob streaming terminated with error", 500) MsgNodeNotFound = ffe("FF10224", "Node with name or identity '%s' not found", 400) - MsgLocalNodeResolveFailed = ffe("FF10225", "Unable to find local node to add to group. Check the status API to confirm the node is registered", 500) + MsgLocalNodeNotSet = ffe("FF10225", "Unable to resolve the local node. Please ensure node.name is configured", 500) MsgGroupNotFound = ffe("FF10226", "Group '%s' not found", 404) MsgDXRESTErr = ffe("FF10229", "Error from data exchange: %s") MsgInvalidHex = ffe("FF10231", "Invalid hex supplied", 400) @@ -144,14 +145,15 @@ var ( MsgIdentityNotFoundByString = ffe("FF10277", "Identity could not be resolved via lookup string '%s'") MsgAuthorOrgSigningKeyMismatch = ffe("FF10279", "Author organization '%s' is not associated with signing key '%s'") MsgCannotTransferToSelf = ffe("FF10280", "From and to addresses must be different", 400) - MsgLocalOrgLookupFailed = ffe("FF10281", "Unable to resolve the local org '%s' by the configured signing key on the node. Please confirm the org is registered with key '%s'", 500) + MsgLocalOrgNotSet = ffe("FF10281", "Unable to resolve the local root org. Please ensure org.name is configured", 500) + MsgTezosconnectRESTErr = ffe("FF10283", "Error from tezos connector: %s") MsgFabconnectRESTErr = ffe("FF10284", "Error from fabconnect: %s") MsgInvalidIdentity = ffe("FF10285", "Supplied Fabric signer identity is invalid", 400) MsgFailedToDecodeCertificate = ffe("FF10286", "Failed to decode certificate: %s", 500) MsgInvalidMessageType = ffe("FF10287", "Invalid message type - allowed types are %s", 400) MsgWSClosed = ffe("FF10290", "Websocket closed") MsgFieldNotSpecified = ffe("FF10292", "Field '%s' must be specified", 400) - MsgTokenPoolNotConfirmed = ffe("FF10293", "Token pool is not yet confirmed") + MsgTokenPoolNotActive = ffe("FF10293", "Token pool is not yet activated") MsgHistogramCollectionParam = ffe("FF10297", "Collection to fetch") MsgInvalidNumberOfIntervals = ffe("FF10298", "Number of time intervals must be between %d and %d", 400) MsgInvalidChartNumberParam = ffe("FF10299", "Invalid %s. Must be a number.", 400) @@ -243,7 +245,7 @@ var ( MsgDefRejectedIDMismatch = ffe("FF10404", "Rejected %s '%s' - ID mismatch with existing record") MsgDefRejectedLocationMismatch = ffe("FF10405", "Rejected %s '%s' - location mismatch with existing record") MsgDefRejectedSchemaFail = ffe("FF10406", "Rejected %s '%s' - schema check: %s") - MsgDefRejectedConflict = ffe("FF10407", "Rejected %s '%s' - conflicts with existing: %s") + MsgDefRejectedConflict = ffe("FF10407", "Rejected %s '%s' - conflicts with existing: %s", 409) MsgDefRejectedIdentityNotFound = ffe("FF10408", "Rejected %s '%s' - identity not found: %s") MsgDefRejectedWrongAuthor = ffe("FF10409", "Rejected %s '%s' - wrong author: %s") MsgDefRejectedHashMismatch = ffe("FF10410", "Rejected %s '%s' - hash mismatch: %s != %s") @@ -283,4 +285,20 @@ var ( MsgOperationNotFoundInTransaction = ffe("FF10444", "No operation of type %s was found in transaction '%s'") MsgCannotSetParameterWithMessage = ffe("FF10445", "Cannot provide a value for '%s' when pinning a message", 400) MsgNamespaceNotStarted = ffe("FF10446", "Namespace '%s' is not started", 412) + MsgNameExists = ffe("FF10447", "Name already exists", 409) + MsgNetworkNameExists = ffe("FF10448", "Network name already exists", 409) + MsgCannotDeletePublished = ffe("FF10449", "Cannot delete an item that has been published", 409) + MsgAlreadyPublished = ffe("FF10450", "Item has already been published", 409) + MsgContractInterfaceNotPublished = ffe("FF10451", "Contract interface '%s' has not been published", 409) + MsgInvalidMessageSigner = ffe("FF10452", "Invalid message '%s'. Key '%s' does not match the signer of the pin: %s") + MsgInvalidMessageIdentity = ffe("FF10453", "Invalid message '%s'. Author '%s' does not match identity registered to %s: %s (%s)") + MsgDuplicateTLSConfig = ffe("FF10454", "Found duplicate TLS Config '%s'", 400) + MsgNotFoundTLSConfig = ffe("FF10455", "Provided TLS Config name '%s' not found for namespace '%s'", 400) + MsgSQLInsertManyOutsideTransaction = ffe("FF10456", "Attempt to perform insert many outside of a transaction", 500) + MsgUnexpectedInterfaceType = ffe("FF10457", "Unexpected interface type: %T", 500) + MsgBlockchainConnectorRESTErrConflict = ffe("FF10458", "Conflict from blockchain connector: %s", 409) + MsgTokensRESTErrConflict = ffe("FF10459", "Conflict from tokens service: %s", 409) + MsgBatchWithDataNotSupported = ffe("FF10460", "Provided subscription '%s' enables batching and withData which is not supported", 400) + MsgBatchDeliveryNotSupported = ffe("FF10461", "Batch delivery not supported by transport '%s'", 400) + MsgWSWrongNamespace = ffe("FF10462", "Websocket request received on a namespace scoped connection but the provided namespace does not match") ) diff --git a/internal/coremsgs/en_struct_descriptions.go b/internal/coremsgs/en_struct_descriptions.go index ab4bc5ca5b..8ecb94618b 100644 --- a/internal/coremsgs/en_struct_descriptions.go +++ b/internal/coremsgs/en_struct_descriptions.go @@ -67,6 +67,7 @@ var ( MessageBatchID = ffm("Message.batch", "The UUID of the batch in which the message was pinned/transferred") MessageState = ffm("Message.state", "The current state of the message") MessageConfirmed = ffm("Message.confirmed", "The timestamp of when the message was confirmed/rejected") + MessageRejectReason = ffm("Message.rejectReason", "If a message was rejected, provides details on the rejection reason") MessageData = ffm("Message.data", "The list of data elements attached to the message") MessagePins = ffm("Message.pins", "For private messages, a unique pin hash:nonce is assigned for each topic") MessageTransactionID = ffm("Message.txid", "The ID of the transaction used to order/deliver this message") @@ -222,13 +223,15 @@ var ( ChartHistogramTypeType = ffm("ChartHistogramType.type", "Name of the type") // ContractAPI field descriptions - ContractAPIID = ffm("ContractAPI.id", "The UUID of the contract API") - ContractAPINamespace = ffm("ContractAPI.namespace", "The namespace of the contract API") - ContractAPIInterface = ffm("ContractAPI.interface", "Reference to the FireFly Interface definition associated with the contract API") - ContractAPILocation = ffm("ContractAPI.location", "If this API is tied to an individual instance of a smart contract, this field can include a blockchain specific contract identifier. For example an Ethereum contract address, or a Fabric chaincode name and channel") - ContractAPIName = ffm("ContractAPI.name", "The name that is used in the URL to access the API") - ContractAPIMessage = ffm("ContractAPI.message", "The UUID of the broadcast message that was used to publish this API to the network") - ContractAPIURLs = ffm("ContractAPI.urls", "The URLs to use to access the API") + ContractAPIID = ffm("ContractAPI.id", "The UUID of the contract API") + ContractAPINamespace = ffm("ContractAPI.namespace", "The namespace of the contract API") + ContractAPIInterface = ffm("ContractAPI.interface", "Reference to the FireFly Interface definition associated with the contract API") + ContractAPILocation = ffm("ContractAPI.location", "If this API is tied to an individual instance of a smart contract, this field can include a blockchain specific contract identifier. For example an Ethereum contract address, or a Fabric chaincode name and channel") + ContractAPIName = ffm("ContractAPI.name", "The name that is used in the URL to access the API") + ContractAPINetworkName = ffm("ContractAPI.networkName", "The published name of the API within the multiparty network") + ContractAPIMessage = ffm("ContractAPI.message", "The UUID of the broadcast message that was used to publish this API to the network") + ContractAPIURLs = ffm("ContractAPI.urls", "The URLs to use to access the API") + ContractAPIPublished = ffm("ContractAPI.published", "Indicates if the API is published to other members of the multiparty network") // ContractURLs field descriptions ContractURLsOpenAPI = ffm("ContractURLs.openapi", "The URL to download the OpenAPI v3 (Swagger) description for the API generated in JSON or YAML format") @@ -244,11 +247,13 @@ var ( FFIMessage = ffm("FFI.message", "The UUID of the broadcast message that was used to publish this FFI to the network") FFINamespace = ffm("FFI.namespace", "The namespace of the FFI") FFIName = ffm("FFI.name", "The name of the FFI - usually matching the smart contract name") + FFINetworkName = ffm("FFI.networkName", "The published name of the FFI within the multiparty network") FFIDescription = ffm("FFI.description", "A description of the smart contract this FFI represents") FFIVersion = ffm("FFI.version", "A version for the FFI - use of semantic versioning such as 'v1.0.1' is encouraged") FFIMethods = ffm("FFI.methods", "An array of smart contract method definitions") FFIEvents = ffm("FFI.events", "An array of smart contract event definitions") FFIErrors = ffm("FFI.errors", "An array of smart contract error definitions") + FFIPublished = ffm("FFI.published", "Indicates if the FFI is published to other members of the multiparty network") // FFIMethod field descriptions FFIMethodID = ffm("FFIMethod.id", "The UUID of the FFI method definition") @@ -532,9 +537,11 @@ var ( SubscriptionBlockchainEventFilterListener = ffm("SubscriptionBlockchainEventFilter.listener", "Regular expression to apply to the blockchain event 'listener' field, which is the UUID of the event listener. So you can restrict your subscription to certain blockchain listeners. Alternatively to avoid your application need to know listener UUIDs you can set the 'topic' field of blockchain event listeners, and use a topic filter on your subscriptions") // SubscriptionCoreOptions field descriptions - SubscriptionCoreOptionsFirstEvent = ffm("SubscriptionCoreOptions.firstEvent", "Whether your application would like to receive events from the 'oldest' event emitted by your FireFly node (from the beginning of time), or the 'newest' event (from now), or a specific event sequence. Default is 'newest'") - SubscriptionCoreOptionsReadAhead = ffm("SubscriptionCoreOptions.readAhead", "The number of events to stream ahead to your application, while waiting for confirmation of consumption of those events. At least once delivery semantics are used in FireFly, so if your application crashes/reconnects this is the maximum number of events you would expect to be redelivered after it restarts") - SubscriptionCoreOptionsWithData = ffm("SubscriptionCoreOptions.withData", "Whether message events delivered over the subscription, should be packaged with the full data of those messages in-line as part of the event JSON payload. Or if the application should make separate REST calls to download that data. May not be supported on some transports.") + SubscriptionCoreOptionsFirstEvent = ffm("SubscriptionCoreOptions.firstEvent", "Whether your application would like to receive events from the 'oldest' event emitted by your FireFly node (from the beginning of time), or the 'newest' event (from now), or a specific event sequence. Default is 'newest'") + SubscriptionCoreOptionsReadAhead = ffm("SubscriptionCoreOptions.readAhead", "The number of events to stream ahead to your application, while waiting for confirmation of consumption of those events. At least once delivery semantics are used in FireFly, so if your application crashes/reconnects this is the maximum number of events you would expect to be redelivered after it restarts") + SubscriptionCoreOptionsWithData = ffm("SubscriptionCoreOptions.withData", "Whether message events delivered over the subscription, should be packaged with the full data of those messages in-line as part of the event JSON payload. Or if the application should make separate REST calls to download that data. May not be supported on some transports.") + SubscriptionCoreOptionsBatch = ffm("SubscriptionCoreOptions.batch", "Events are delivered in batches in an ordered array. The batch size is capped to the readAhead limit. The event payload is always an array even if there is a single event in the batch. Commonly used with Webhooks to allow events to be delivered and acknowledged in batches.") + SubscriptionCoreOptionsBatchTimeout = ffm("SubscriptionCoreOptions.batchTimeout", "When batching is enabled, the optional timeout to send events even when the batch hasn't filled.") // TokenApproval field descriptions TokenApprovalLocalID = ffm("TokenApproval.localId", "The UUID of this token approval, in the local FireFly node") @@ -578,14 +585,15 @@ var ( TokenPoolType = ffm("TokenPool.type", "The type of token the pool contains, such as fungible/non-fungible") TokenPoolNamespace = ffm("TokenPool.namespace", "The namespace for the token pool") TokenPoolName = ffm("TokenPool.name", "The name of the token pool. Note the name is not validated against the description of the token on the blockchain") + TokenPoolNetworkName = ffm("TokenPool.networkName", "The published name of the token pool within the multiparty network") TokenPoolStandard = ffm("TokenPool.standard", "The ERC standard the token pool conforms to, as reported by the token connector") TokenPoolLocator = ffm("TokenPool.locator", "A unique identifier for the pool, as provided by the token connector") TokenPoolKey = ffm("TokenPool.key", "The signing key used to create the token pool. On input for token connectors that support on-chain deployment of new tokens (vs. only index existing ones) this determines the signing key used to create the token on-chain") TokenPoolSymbol = ffm("TokenPool.symbol", "The token symbol. If supplied on input for an existing on-chain token, this must match the on-chain information") TokenPoolDecimals = ffm("TokenPool.decimals", "Number of decimal places that this token has") TokenPoolConnector = ffm("TokenPool.connector", "The name of the token connector, as specified in the FireFly core configuration file that is responsible for the token pool. Required on input when multiple token connectors are configured") - TokenPoolMessage = ffm("TokenPool.message", "The UUID of the broadcast message used to inform the network to index this pool") - TokenPoolState = ffm("TokenPool.state", "The current state of the token pool") + TokenPoolMessage = ffm("TokenPool.message", "The UUID of the broadcast message used to inform the network about this pool") + TokenPoolActive = ffm("TokenPool.active", "Indicates whether the pool has been successfully activated with the token connector") TokenPoolCreated = ffm("TokenPool.created", "The creation time of the pool") TokenPoolConfig = ffm("TokenPool.config", "Input only field, with token connector specific configuration of the pool, such as an existing Ethereum address and block number to used to index the pool. See your chosen token connector documentation for details") TokenPoolInfo = ffm("TokenPool.info", "Token connector specific information about the pool. See your chosen token connector documentation for details") @@ -593,6 +601,7 @@ var ( TokenPoolInterface = ffm("TokenPool.interface", "A reference to an existing FFI, containing pre-registered type information for the token contract") TokenPoolInterfaceFormat = ffm("TokenPool.interfaceFormat", "The interface encoding format supported by the connector for this token pool") TokenPoolMethods = ffm("TokenPool.methods", "The method definitions resolved by the token connector to be used by each token operation") + TokenPoolPublished = ffm("TokenPool.published", "Indicates if the token pool is published to other members of the multiparty network") // TokenPoolInput field descriptions TokenPoolInputIdempotencyKey = ffm("TokenPoolInput.idempotencyKey", "An optional identifier to allow idempotent submission of requests. Stored on the transaction uniquely within a namespace") @@ -653,6 +662,7 @@ var ( ContractCallRequestMethodPath = ffm("ContractCallRequest.methodPath", "The pathname of the method on the specified FFI") ContractCallRequestErrors = ffm("ContractCallRequest.errors", "An in-line FFI errors definition for the method to invoke. Alternative to specifying FFI") ContractCallRequestInput = ffm("ContractCallRequest.input", "A map of named inputs. The name and type of each input must be compatible with the FFI description of the method, so that FireFly knows how to serialize it to the blockchain via the connector") + ContractCallRequestOutput = ffm("ContractCallRequest.output", "A map of named outputs") ContractCallRequestOptions = ffm("ContractCallRequest.options", "A map of named inputs that will be passed through to the blockchain connector") ContractCallMessage = ffm("ContractCallRequest.message", "You can specify a message to correlate with the invocation, which can be of type broadcast or private. Your specified method must support on-chain/off-chain correlation by taking a data input on the call") ContractCallIdempotencyKey = ffm("ContractCallRequest.idempotencyKey", "An optional identifier to allow idempotent submission of requests. Stored on the transaction uniquely within a namespace") @@ -674,22 +684,39 @@ var ( WSSubscriptionStatusFilter = ffm("WSSubscriptionStatus.filter", "The subscription filter specification") WSSubscriptionStatusStartTime = ffm("WSSubscriptionStatus.startTime", "The time the subscription started (reset on dynamic namespace reload)") - WebhooksOptJSON = ffm("WebhookSubOptions.json", "Webhooks only: Whether to assume the response body is JSON, regardless of the returned Content-Type") - WebhooksOptReply = ffm("WebhookSubOptions.reply", "Webhooks only: Whether to automatically send a reply event, using the body returned by the webhook") - WebhooksOptHeaders = ffm("WebhookSubOptions.headers", "Webhooks only: Static headers to set on the webhook request") - WebhooksOptQuery = ffm("WebhookSubOptions.query", "Webhooks only: Static query params to set on the webhook request") - WebhooksOptInput = ffm("WebhookSubOptions.input", "Webhooks only: A set of options to extract data from the first JSON input data in the incoming message. Only applies if withData=true") - WebhooksOptFastAck = ffm("WebhookSubOptions.fastack", "Webhooks only: When true the event will be acknowledged before the webhook is invoked, allowing parallel invocations") - WebhooksOptURL = ffm("WebhookSubOptions.url", "Webhooks only: HTTP url to invoke. Can be relative if a base URL is set in the webhook plugin config") - WebhooksOptMethod = ffm("WebhookSubOptions.method", "Webhooks only: HTTP method to invoke. Default=POST") - WebhooksOptReplyTag = ffm("WebhookSubOptions.replytag", "Webhooks only: The tag to set on the reply message") - WebhooksOptReplyTx = ffm("WebhookSubOptions.replytx", "Webhooks only: The transaction type to set on the reply message") - WebhooksOptInputQuery = ffm("WebhookInputOptions.query", "A top-level property of the first data input, to use for query parameters") - WebhooksOptInputHeaders = ffm("WebhookInputOptions.headers", "A top-level property of the first data input, to use for headers") - WebhooksOptInputBody = ffm("WebhookInputOptions.body", "A top-level property of the first data input, to use for the request body. Default is the whole first body") - WebhooksOptInputPath = ffm("WebhookInputOptions.path", "A top-level property of the first data input, to use for a path to append with escaping to the webhook path") - WebhooksOptInputReplyTx = ffm("WebhookInputOptions.replytx", "A top-level property of the first data input, to use to dynamically set whether to pin the response (so the requester can choose)") + WebhooksOptJSON = ffm("WebhookSubOptions.json", "Webhooks only: Whether to assume the response body is JSON, regardless of the returned Content-Type") + WebhooksOptReply = ffm("WebhookSubOptions.reply", "Webhooks only: Whether to automatically send a reply event, using the body returned by the webhook") + WebhooksOptHeaders = ffm("WebhookSubOptions.headers", "Webhooks only: Static headers to set on the webhook request") + WebhooksOptQuery = ffm("WebhookSubOptions.query", "Webhooks only: Static query params to set on the webhook request") + WebhooksOptInput = ffm("WebhookSubOptions.input", "Webhooks only: A set of options to extract data from the first JSON input data in the incoming message. Only applies if withData=true") + WebhooksOptFastAck = ffm("WebhookSubOptions.fastack", "Webhooks only: When true the event will be acknowledged before the webhook is invoked, allowing parallel invocations") + WebhooksOptURL = ffm("WebhookSubOptions.url", "Webhooks only: HTTP url to invoke. Can be relative if a base URL is set in the webhook plugin config") + WebhooksOptMethod = ffm("WebhookSubOptions.method", "Webhooks only: HTTP method to invoke. Default=POST") + WebhooksOptReplyTag = ffm("WebhookSubOptions.replytag", "Webhooks only: The tag to set on the reply message") + WebhooksOptReplyTx = ffm("WebhookSubOptions.replytx", "Webhooks only: The transaction type to set on the reply message") + WebhooksOptTLSConfigName = ffm("WebhookSubOptions.tlsConfigName", "The name of an existing TLS configuration associated to the namespace to use") + WebhooksOptHTTPOptions = ffm("WebhookSubOptions.httpOptions", "Webhooks only: a set of options for HTTP") + WebhooksOptHTTPRetry = ffm("WebhookSubOptions.retry", "Webhooks only: a set of options for retrying the webhook call") + WebhooksOptInputQuery = ffm("WebhookInputOptions.query", "A top-level property of the first data input, to use for query parameters") + WebhooksOptInputHeaders = ffm("WebhookInputOptions.headers", "A top-level property of the first data input, to use for headers") + WebhooksOptInputBody = ffm("WebhookInputOptions.body", "A top-level property of the first data input, to use for the request body. Default is the whole first body") + WebhooksOptInputPath = ffm("WebhookInputOptions.path", "A top-level property of the first data input, to use for a path to append with escaping to the webhook path") + WebhooksOptInputReplyTx = ffm("WebhookInputOptions.replytx", "A top-level property of the first data input, to use to dynamically set whether to pin the response (so the requester can choose)") + WebhooksOptRetryEnabled = ffm("WebhookRetryOptions.enabled", "Enables retry on HTTP calls, defaults to false") + WebhooksOptRetryCount = ffm("WebhookRetryOptions.count", "Number of times to retry the webhook call in case of failure") + WebhooksOptRetryInitialDelay = ffm("WebhookRetryOptions.initialDelay", "Initial delay between retries when we retry the webhook call") + WebhooksOptRetryMaxDelay = ffm("WebhookRetryOptions.maxDelay", "Max delay between retries when we retry the webhookcall") + WebhookOptHTTPExpectContinueTimeout = ffm("WebhookHTTPOptions.expectContinueTimeout", "See [ExpectContinueTimeout in the Go docs](https://pkg.go.dev/net/http#Transport)") + WebhookOptHTTPIdleTimeout = ffm("WebhookHTTPOptions.idleTimeout", "The max duration to hold a HTTP keepalive connection between calls") + WebhookOptHTTPMaxIdleConns = ffm("WebhookHTTPOptions.maxIdleConns", "The max number of idle connections to hold pooled") + WebhookOptHTTPConnectionTimeout = ffm("WebhookHTTPOptions.connectionTimeout", "The maximum amount of time that a connection is allowed to remain with no data transmitted.") + WebhookOptHTTPTLSHandshakeTimeout = ffm("WebhookHTTPOptions.tlsHandshakeTimeout", "The max duration to hold a TLS handshake alive") + WebhookOptHTTPRequestTimeout = ffm("WebhookHTTPOptions.requestTimeout", "The max duration to hold a TLS handshake alive") + WebhookOptHTTPProxyURL = ffm("WebhookHTTPOptions.proxyURL", "HTTP proxy URL to use for outbound requests to the webhook") // PublishInput field descriptions PublishInputIdempotencyKey = ffm("PublishInput.idempotencyKey", "An optional identifier to allow idempotent submission of requests. Stored on the transaction uniquely within a namespace") + + // DefinitionPublish field descriptions + DefinitionPublishNetworkName = ffm("DefinitionPublish.networkName", "An optional name to be used for publishing this definition to the multiparty network, which may differ from the local name") ) diff --git a/internal/data/data_manager.go b/internal/data/data_manager.go index f60d07133b..d807698fda 100644 --- a/internal/data/data_manager.go +++ b/internal/data/data_manager.go @@ -42,7 +42,7 @@ type Manager interface { PeekMessageCache(ctx context.Context, id *fftypes.UUID, options ...CacheReadOption) (msg *core.Message, data core.DataArray) UpdateMessageCache(msg *core.Message, data core.DataArray) UpdateMessageIfCached(ctx context.Context, msg *core.Message) - UpdateMessageStateIfCached(ctx context.Context, id *fftypes.UUID, state core.MessageState, confirmed *fftypes.FFTime) + UpdateMessageStateIfCached(ctx context.Context, id *fftypes.UUID, state core.MessageState, confirmed *fftypes.FFTime, rejectReason string) ResolveInlineData(ctx context.Context, msg *NewMessage) error WriteNewMessage(ctx context.Context, newMsg *NewMessage) error BlobsEnabled() bool @@ -287,11 +287,12 @@ func (dm *dataManager) UpdateMessageIfCached(ctx context.Context, msg *core.Mess } } -func (dm *dataManager) UpdateMessageStateIfCached(ctx context.Context, id *fftypes.UUID, state core.MessageState, confirmed *fftypes.FFTime) { +func (dm *dataManager) UpdateMessageStateIfCached(ctx context.Context, id *fftypes.UUID, state core.MessageState, confirmed *fftypes.FFTime, rejectReason string) { mce := dm.queryMessageCache(ctx, id) if mce != nil { mce.msg.State = state mce.msg.Confirmed = confirmed + mce.msg.RejectReason = rejectReason } } diff --git a/internal/data/data_manager_test.go b/internal/data/data_manager_test.go index c8bebc9d9b..a8a3e9afd2 100644 --- a/internal/data/data_manager_test.go +++ b/internal/data/data_manager_test.go @@ -1113,7 +1113,7 @@ func TestUpdateMessageCacheCRORequirePins(t *testing.T) { } now := fftypes.Now() - dm.UpdateMessageStateIfCached(ctx, msgWithPins.Header.ID, core.MessageStateConfirmed, now) + dm.UpdateMessageStateIfCached(ctx, msgWithPins.Header.ID, core.MessageStateConfirmed, now, "") assert.Equal(t, core.MessageStateConfirmed, msgWithPins.State) assert.Equal(t, now, msgWithPins.Confirmed) diff --git a/internal/data/message_writer.go b/internal/data/message_writer.go index 62d1839102..84f7079085 100644 --- a/internal/data/message_writer.go +++ b/internal/data/message_writer.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // diff --git a/internal/database/postgres/postgres.go b/internal/database/postgres/postgres.go index 812cefef2b..c4e573c7a0 100644 --- a/internal/database/postgres/postgres.go +++ b/internal/database/postgres/postgres.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -41,6 +41,9 @@ type Postgres struct { func (psql *Postgres) Init(ctx context.Context, config config.Section) error { capabilities := &database.Capabilities{} + if config.GetInt(dbsql.SQLConfMaxConnections) > 1 { + capabilities.Concurrency = true + } return psql.SQLCommon.Init(ctx, psql, config, capabilities) } diff --git a/internal/database/sqlcommon/batch_sql.go b/internal/database/sqlcommon/batch_sql.go index 007209e409..74a128a9df 100644 --- a/internal/database/sqlcommon/batch_sql.go +++ b/internal/database/sqlcommon/batch_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2023 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -172,7 +172,7 @@ func (s *SQLCommon) GetBatches(ctx context.Context, namespace string, filter ffa batches = append(batches, batch) } - return batches, s.QueryRes(ctx, batchesTable, tx, fop, fi), err + return batches, s.QueryRes(ctx, batchesTable, tx, fop, nil, fi), err } diff --git a/internal/database/sqlcommon/blob_sql.go b/internal/database/sqlcommon/blob_sql.go index f702eb7c5b..6bbc34b17d 100644 --- a/internal/database/sqlcommon/blob_sql.go +++ b/internal/database/sqlcommon/blob_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2023 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -162,7 +162,7 @@ func (s *SQLCommon) GetBlobs(ctx context.Context, namespace string, filter ffapi blob = append(blob, d) } - return blob, s.QueryRes(ctx, blobsTable, tx, fop, fi), err + return blob, s.QueryRes(ctx, blobsTable, tx, fop, nil, fi), err } diff --git a/internal/database/sqlcommon/blockchainevents_sql.go b/internal/database/sqlcommon/blockchainevents_sql.go index 80dbe12ee4..f6c13c06b8 100644 --- a/internal/database/sqlcommon/blockchainevents_sql.go +++ b/internal/database/sqlcommon/blockchainevents_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2023 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -83,6 +83,47 @@ func (s *SQLCommon) attemptBlockchainEventInsert(ctx context.Context, tx *dbsql. return err } +func (s *SQLCommon) InsertBlockchainEvents(ctx context.Context, events []*core.BlockchainEvent, hooks ...database.PostCompletionHook) (err error) { + + ctx, tx, autoCommit, err := s.BeginOrUseTx(ctx) + if err != nil { + return err + } + defer s.RollbackTx(ctx, tx, autoCommit) + if s.Features().MultiRowInsert { + query := sq.Insert(blockchaineventsTable).Columns(blockchainEventColumns...) + for _, event := range events { + query = s.setBlockchainEventInsertValues(query, event) + } + sequences := make([]int64, len(events)) + + // Use a single multi-row insert for the messages + err := s.InsertTxRows(ctx, blockchaineventsTable, tx, query, func() { + for _, event := range events { + s.callbacks.UUIDCollectionNSEvent(database.CollectionBlockchainEvents, core.ChangeEventTypeCreated, event.Namespace, event.ID) + } + }, sequences, true /* we want the caller to be able to retry with individual upserts */) + if err != nil { + return err + } + } else { + // Fall back to individual inserts grouped in a TX + for _, event := range events { + err := s.attemptBlockchainEventInsert(ctx, tx, event, false) + if err != nil { + return err + } + } + } + + for _, hook := range hooks { + tx.AddPostCommitHook(hook) + } + + return s.CommitTx(ctx, tx, autoCommit) + +} + func (s *SQLCommon) InsertOrGetBlockchainEvent(ctx context.Context, event *core.BlockchainEvent) (existing *core.BlockchainEvent, err error) { ctx, tx, autoCommit, err := s.BeginOrUseTx(ctx) if err != nil { @@ -187,5 +228,5 @@ func (s *SQLCommon) GetBlockchainEvents(ctx context.Context, namespace string, f events = append(events, event) } - return events, s.QueryRes(ctx, blockchaineventsTable, tx, fop, fi), err + return events, s.QueryRes(ctx, blockchaineventsTable, tx, fop, nil, fi), err } diff --git a/internal/database/sqlcommon/blockchainevents_sql_test.go b/internal/database/sqlcommon/blockchainevents_sql_test.go index 796cd871bb..89f7b2dd85 100644 --- a/internal/database/sqlcommon/blockchainevents_sql_test.go +++ b/internal/database/sqlcommon/blockchainevents_sql_test.go @@ -115,6 +115,7 @@ func TestBlockchainEventsE2EWithDB(t *testing.T) { existing, err = s.InsertOrGetBlockchainEvent(ctx, event4) assert.NoError(t, err) assert.Equal(t, event3.ID, existing.ID) + } func TestInsertBlockchainEventFailBegin(t *testing.T) { @@ -125,6 +126,67 @@ func TestInsertBlockchainEventFailBegin(t *testing.T) { assert.NoError(t, mock.ExpectationsWereMet()) } +func TestInsertBlockchainEventsBeginFail(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectBegin().WillReturnError(fmt.Errorf("pop")) + err := s.InsertBlockchainEvents(context.Background(), []*core.BlockchainEvent{}) + assert.Regexp(t, "FF00175", err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + +func TestInsertBlockchainEventsMultiRowOK(t *testing.T) { + s := newMockProvider() + s.multiRowInsert = true + s.fakePSQLInsert = true + s, mock := s.init() + + be1 := &core.BlockchainEvent{ID: fftypes.NewUUID(), Namespace: "ns1"} + be2 := &core.BlockchainEvent{ID: fftypes.NewUUID(), Namespace: "ns1"} + s.callbacks.On("UUIDCollectionNSEvent", database.CollectionBlockchainEvents, core.ChangeEventTypeCreated, "ns1", be1.ID) + s.callbacks.On("UUIDCollectionNSEvent", database.CollectionBlockchainEvents, core.ChangeEventTypeCreated, "ns1", be2.ID) + + mock.ExpectBegin() + mock.ExpectQuery("INSERT.*blockchainevents").WillReturnRows(sqlmock.NewRows([]string{s.SequenceColumn()}). + AddRow(int64(1001)). + AddRow(int64(1002)), + ) + mock.ExpectCommit() + hookCalled := make(chan struct{}, 1) + err := s.InsertBlockchainEvents(context.Background(), []*core.BlockchainEvent{be1, be2}, func() { + close(hookCalled) + }) + <-hookCalled + assert.NoError(t, err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + +func TestInsertBlockchainEventsMultiRowFail(t *testing.T) { + s := newMockProvider() + s.multiRowInsert = true + s.fakePSQLInsert = true + s, mock := s.init() + be1 := &core.BlockchainEvent{ID: fftypes.NewUUID(), Namespace: "ns1"} + mock.ExpectBegin() + mock.ExpectQuery("INSERT.*").WillReturnError(fmt.Errorf("pop")) + err := s.InsertBlockchainEvents(context.Background(), []*core.BlockchainEvent{be1}) + assert.Regexp(t, "FF00177", err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + +func TestInsertBlockchainEventsSingleRowFail(t *testing.T) { + s, mock := newMockProvider().init() + be1 := &core.BlockchainEvent{ID: fftypes.NewUUID(), Namespace: "ns1"} + mock.ExpectBegin() + mock.ExpectExec("INSERT.*").WillReturnError(fmt.Errorf("pop")) + err := s.InsertBlockchainEvents(context.Background(), []*core.BlockchainEvent{be1}) + assert.Regexp(t, "FF00177", err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + func TestInsertBlockchainEventFailInsert(t *testing.T) { s, mock := newMockProvider().init() mock.ExpectBegin() @@ -157,9 +219,9 @@ func TestGetBlockchainEventByIDSelectFail(t *testing.T) { func TestGetBlockchainEventByIDNotFound(t *testing.T) { s, mock := newMockProvider().init() mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{})) - msg, err := s.GetBlockchainEventByID(context.Background(), "ns", fftypes.NewUUID()) + be, err := s.GetBlockchainEventByID(context.Background(), "ns", fftypes.NewUUID()) assert.NoError(t, err) - assert.Nil(t, msg) + assert.Nil(t, be) assert.NoError(t, mock.ExpectationsWereMet()) } diff --git a/internal/database/sqlcommon/contractapis_sql.go b/internal/database/sqlcommon/contractapis_sql.go index 740bf01c4c..4d31b578bd 100644 --- a/internal/database/sqlcommon/contractapis_sql.go +++ b/internal/database/sqlcommon/contractapis_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2023 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -21,6 +21,7 @@ import ( "database/sql" sq "github.com/Masterminds/squirrel" + "github.com/hyperledger/firefly-common/pkg/dbsql" "github.com/hyperledger/firefly-common/pkg/ffapi" "github.com/hyperledger/firefly-common/pkg/fftypes" "github.com/hyperledger/firefly-common/pkg/i18n" @@ -36,85 +37,150 @@ var ( "interface_id", "location", "name", + "network_name", "namespace", "message_id", + "published", } contractAPIsFilterFieldMap = map[string]string{ - "interface": "interface_id", - "message": "message_id", + "interface": "interface_id", + "message": "message_id", + "networkname": "network_name", } ) const contractapisTable = "contractapis" -func (s *SQLCommon) UpsertContractAPI(ctx context.Context, api *core.ContractAPI) (err error) { +func (s *SQLCommon) attemptContractAPIUpdate(ctx context.Context, tx *dbsql.TXWrapper, api *core.ContractAPI) (int64, error) { + var networkName *string + if api.NetworkName != "" { + networkName = &api.NetworkName + } + var ifaceID *fftypes.UUID + if api.Interface != nil { + ifaceID = api.Interface.ID + } + return s.UpdateTx(ctx, contractapisTable, tx, + sq.Update(contractapisTable). + Set("interface_id", ifaceID). + Set("location", api.Location). + Set("name", api.Name). + Set("network_name", networkName). + Set("message_id", api.Message). + Set("published", api.Published). + Where(sq.Eq{"id": api.ID}), + func() { + s.callbacks.UUIDCollectionNSEvent(database.CollectionContractAPIs, core.ChangeEventTypeUpdated, api.Namespace, api.ID) + }, + ) +} + +func (s *SQLCommon) setContractAPIInsertValues(query sq.InsertBuilder, api *core.ContractAPI) sq.InsertBuilder { + var networkName *string + if api.NetworkName != "" { + networkName = &api.NetworkName + } + var ifaceID *fftypes.UUID + if api.Interface != nil { + ifaceID = api.Interface.ID + } + return query.Values( + api.ID, + ifaceID, + api.Location, + api.Name, + networkName, + api.Namespace, + api.Message, + api.Published, + ) +} + +func (s *SQLCommon) attemptContractAPIInsert(ctx context.Context, tx *dbsql.TXWrapper, api *core.ContractAPI, requestConflictEmptyResult bool) error { + _, err := s.InsertTxExt(ctx, contractapisTable, tx, + s.setContractAPIInsertValues(sq.Insert(contractapisTable).Columns(contractAPIsColumns...), api), + func() { + s.callbacks.UUIDCollectionNSEvent(database.CollectionContractAPIs, core.ChangeEventTypeCreated, api.Namespace, api.ID) + }, requestConflictEmptyResult) + return err +} + +func (s *SQLCommon) contractAPIExists(ctx context.Context, tx *dbsql.TXWrapper, api *core.ContractAPI) (bool, error) { + rows, _, err := s.QueryTx(ctx, contractapisTable, tx, + sq.Select("id").From(contractapisTable).Where(sq.And{ + sq.Eq{ + "namespace": api.Namespace, + }, + sq.Or{ + sq.Eq{"name": api.Name}, + sq.Eq{"network_name": api.NetworkName}, + }, + }), + ) + if err != nil { + return false, err + } + defer rows.Close() + return rows.Next(), nil +} + +func (s *SQLCommon) InsertOrGetContractAPI(ctx context.Context, api *core.ContractAPI) (*core.ContractAPI, error) { ctx, tx, autoCommit, err := s.BeginOrUseTx(ctx) if err != nil { - return err + return nil, err } defer s.RollbackTx(ctx, tx, autoCommit) - rows, _, err := s.QueryTx(ctx, contractapisTable, tx, - sq.Select("id"). - From(contractapisTable). - Where(sq.And{ - sq.Eq{"namespace": api.Namespace}, - sq.Or{ - sq.Eq{"id": api.ID}, - sq.Eq{"name": api.Name}, - }, - }), - ) + insertErr := s.attemptContractAPIInsert(ctx, tx, api, true /* we want a failure here we can progress past */) + if insertErr == nil { + return nil, s.CommitTx(ctx, tx, autoCommit) + } + log.L(ctx).Debugf("Contract API insert failed due to err: %+v, retrieving the existing contract API", insertErr) + + // Do a select within the transaction to determine if the API already exists + existing, queryErr := s.getContractAPIPred(ctx, api.Namespace+":"+api.Name, sq.And{ + sq.Eq{"namespace": api.Namespace}, + sq.Or{ + sq.Eq{"id": api.ID}, + sq.Eq{"name": api.Name}, + sq.Eq{"network_name": api.NetworkName}, + }, + }) + if queryErr != nil || existing != nil { + return existing, queryErr + } + + // Error was apparently not an index conflict - must have been something else + return nil, insertErr +} + +func (s *SQLCommon) UpsertContractAPI(ctx context.Context, api *core.ContractAPI, optimization database.UpsertOptimization) (err error) { + ctx, tx, autoCommit, err := s.BeginOrUseTx(ctx) if err != nil { return err } + defer s.RollbackTx(ctx, tx, autoCommit) - existing := false - for rows.Next() { - existing = true - var id fftypes.UUID - _ = rows.Scan(&id) - if api.ID != nil && *api.ID != id { - rows.Close() - return database.IDMismatch - } - api.ID = &id // Update on returned object - } - rows.Close() - - if existing { - if _, err = s.UpdateTx(ctx, contractapisTable, tx, - sq.Update(contractapisTable). - Set("interface_id", api.Interface.ID). - Set("location", api.Location). - Set("name", api.Name). - Set("message_id", api.Message). - Where(sq.Eq{ - "namespace": api.Namespace, - "id": api.ID, - }), - func() { - s.callbacks.UUIDCollectionNSEvent(database.CollectionContractAPIs, core.ChangeEventTypeUpdated, api.Namespace, api.ID) - }, - ); err != nil { + optimized := false + if optimization == database.UpsertOptimizationNew { + opErr := s.attemptContractAPIInsert(ctx, tx, api, true /* we want a failure here we can progress past */) + optimized = opErr == nil + } else if optimization == database.UpsertOptimizationExisting { + rowsAffected, opErr := s.attemptContractAPIUpdate(ctx, tx, api) + optimized = opErr == nil && rowsAffected == 1 + } + + if !optimized { + // Do a select within the transaction to determine if the API already exists + exists, err := s.contractAPIExists(ctx, tx, api) + if err != nil { return err + } else if exists { + if _, err := s.attemptContractAPIUpdate(ctx, tx, api); err != nil { + return err + } } - } else { - if _, err = s.InsertTx(ctx, contractapisTable, tx, - sq.Insert(contractapisTable). - Columns(contractAPIsColumns...). - Values( - api.ID, - api.Interface.ID, - api.Location, - api.Name, - api.Namespace, - api.Message, - ), - func() { - s.callbacks.UUIDCollectionNSEvent(database.CollectionContractAPIs, core.ChangeEventTypeCreated, api.Namespace, api.ID) - }, - ); err != nil { + if err := s.attemptContractAPIInsert(ctx, tx, api, false); err != nil { return err } } @@ -126,14 +192,20 @@ func (s *SQLCommon) contractAPIResult(ctx context.Context, row *sql.Rows) (*core api := core.ContractAPI{ Interface: &fftypes.FFIReference{}, } + var networkName *string err := row.Scan( &api.ID, &api.Interface.ID, &api.Location, &api.Name, + &networkName, &api.Namespace, &api.Message, + &api.Published, ) + if networkName != nil { + api.NetworkName = *networkName + } if err != nil { return nil, i18n.WrapError(ctx, err, coremsgs.MsgDBReadErr, "contract") } @@ -187,7 +259,7 @@ func (s *SQLCommon) GetContractAPIs(ctx context.Context, namespace string, filte apis = append(apis, api) } - return apis, s.QueryRes(ctx, contractapisTable, tx, fop, fi), err + return apis, s.QueryRes(ctx, contractapisTable, tx, fop, nil, fi), err } @@ -198,3 +270,26 @@ func (s *SQLCommon) GetContractAPIByID(ctx context.Context, namespace string, id func (s *SQLCommon) GetContractAPIByName(ctx context.Context, namespace, name string) (*core.ContractAPI, error) { return s.getContractAPIPred(ctx, namespace+":"+name, sq.Eq{"namespace": namespace, "name": name}) } + +func (s *SQLCommon) GetContractAPIByNetworkName(ctx context.Context, namespace, networkName string) (*core.ContractAPI, error) { + return s.getContractAPIPred(ctx, namespace+":"+networkName, sq.Eq{"namespace": namespace, "network_name": networkName}) +} + +func (s *SQLCommon) DeleteContractAPI(ctx context.Context, namespace string, id *fftypes.UUID) error { + ctx, tx, autoCommit, err := s.BeginOrUseTx(ctx) + if err != nil { + return err + } + defer s.RollbackTx(ctx, tx, autoCommit) + + err = s.DeleteTx(ctx, contractapisTable, tx, sq.Delete(contractapisTable).Where(sq.Eq{ + "id": id, "namespace": namespace, + }), func() { + s.callbacks.UUIDCollectionNSEvent(database.CollectionContractAPIs, core.ChangeEventTypeDeleted, namespace, id) + }) + if err != nil { + return err + } + + return s.CommitTx(ctx, tx, autoCommit) +} diff --git a/internal/database/sqlcommon/contractapis_sql_test.go b/internal/database/sqlcommon/contractapis_sql_test.go index 7162b52a22..375768c332 100644 --- a/internal/database/sqlcommon/contractapis_sql_test.go +++ b/internal/database/sqlcommon/contractapis_sql_test.go @@ -24,7 +24,6 @@ import ( "github.com/DATA-DOG/go-sqlmock" "github.com/hyperledger/firefly-common/pkg/fftypes" "github.com/hyperledger/firefly-common/pkg/log" - "github.com/hyperledger/firefly/mocks/databasemocks" "github.com/hyperledger/firefly/pkg/core" "github.com/hyperledger/firefly/pkg/database" "github.com/stretchr/testify/assert" @@ -43,9 +42,10 @@ func TestContractAPIE2EWithDB(t *testing.T) { interfaceID := fftypes.NewUUID() contractAPI := &core.ContractAPI{ - ID: apiID, - Namespace: "ns1", - Name: "banana", + ID: apiID, + Namespace: "ns1", + Name: "banana", + NetworkName: "banana-net", Interface: &fftypes.FFIReference{ ID: interfaceID, Name: "banana", @@ -56,9 +56,11 @@ func TestContractAPIE2EWithDB(t *testing.T) { s.callbacks.On("UUIDCollectionNSEvent", database.CollectionContractAPIs, core.ChangeEventTypeCreated, "ns1", apiID, mock.Anything).Return() s.callbacks.On("UUIDCollectionNSEvent", database.CollectionContractAPIs, core.ChangeEventTypeUpdated, "ns1", apiID, mock.Anything).Return() + s.callbacks.On("UUIDCollectionNSEvent", database.CollectionContractAPIs, core.ChangeEventTypeDeleted, "ns1", apiID, mock.Anything).Return() - err := s.UpsertContractAPI(ctx, contractAPI) + existing, err := s.InsertOrGetContractAPI(ctx, contractAPI) assert.NoError(t, err) + assert.Nil(t, existing) // Check we get the exact same ContractAPI back dataRead, err := s.GetContractAPIByID(ctx, "ns1", apiID) @@ -68,7 +70,7 @@ func TestContractAPIE2EWithDB(t *testing.T) { contractAPI.Interface.Version = "v1.1.0" - err = s.UpsertContractAPI(ctx, contractAPI) + err = s.UpsertContractAPI(ctx, contractAPI, database.UpsertOptimizationExisting) assert.NoError(t, err) // Check we get the exact same ContractAPI back @@ -76,12 +78,70 @@ func TestContractAPIE2EWithDB(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, dataRead) assert.Equal(t, *apiID, *dataRead.ID) + + dataRead, err = s.GetContractAPIByName(ctx, "ns1", "banana") + assert.NoError(t, err) + assert.NotNil(t, dataRead) + assert.Equal(t, *apiID, *dataRead.ID) + + dataRead, err = s.GetContractAPIByNetworkName(ctx, "ns1", "banana-net") + assert.NoError(t, err) + assert.NotNil(t, dataRead) + assert.Equal(t, *apiID, *dataRead.ID) + + // Cannot insert again with same name or network name + existing, err = s.InsertOrGetContractAPI(ctx, &core.ContractAPI{ + ID: fftypes.NewUUID(), + Name: "banana", + Namespace: "ns1", + Interface: &fftypes.FFIReference{ + ID: interfaceID, + }, + }) + assert.NoError(t, err) + assert.Equal(t, contractAPI.ID, existing.ID) + existing, err = s.InsertOrGetContractAPI(ctx, &core.ContractAPI{ + ID: fftypes.NewUUID(), + NetworkName: "banana-net", + Namespace: "ns1", + Interface: &fftypes.FFIReference{ + ID: interfaceID, + }, + }) + assert.NoError(t, err) + assert.Equal(t, contractAPI.ID, existing.ID) + + // Delete the API + err = s.DeleteContractAPI(ctx, "ns1", contractAPI.ID) + assert.NoError(t, err) +} + +func TestContractAPIInsertOrGetFailBeginTransaction(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectBegin().WillReturnError(fmt.Errorf("pop")) + _, err := s.InsertOrGetContractAPI(context.Background(), &core.ContractAPI{}) + assert.Regexp(t, "FF00175", err) + assert.NoError(t, mock.ExpectationsWereMet()) +} + +func TestContractAPIInsertOrGetFailInsert(t *testing.T) { + rows := sqlmock.NewRows([]string{"id", "interface_id", "ledger", "location", "name", "network_name", "namespace", "message_id", "published"}) + s, mock := newMockProvider().init() + mock.ExpectBegin() + mock.ExpectExec("INSERT .*").WillReturnError(fmt.Errorf("pop")) + mock.ExpectQuery("SELECT .*").WillReturnRows(rows) + api := &core.ContractAPI{ + Interface: &fftypes.FFIReference{}, + } + _, err := s.InsertOrGetContractAPI(context.Background(), api) + assert.Regexp(t, "FF00177", err) + assert.NoError(t, mock.ExpectationsWereMet()) } func TestContractAPIDBFailBeginTransaction(t *testing.T) { s, mock := newMockProvider().init() mock.ExpectBegin().WillReturnError(fmt.Errorf("pop")) - err := s.UpsertContractAPI(context.Background(), &core.ContractAPI{}) + err := s.UpsertContractAPI(context.Background(), &core.ContractAPI{}, database.UpsertOptimizationNew) assert.Regexp(t, "FF00175", err) assert.NoError(t, mock.ExpectationsWereMet()) } @@ -90,21 +150,21 @@ func TestContractAPIDBFailSelect(t *testing.T) { s, mock := newMockProvider().init() mock.ExpectBegin() mock.ExpectQuery("SELECT .*").WillReturnError(fmt.Errorf("pop")) - err := s.UpsertContractAPI(context.Background(), &core.ContractAPI{}) + err := s.UpsertContractAPI(context.Background(), &core.ContractAPI{}, database.UpsertOptimizationNew) assert.Regexp(t, "pop", err) assert.NoError(t, mock.ExpectationsWereMet()) } func TestContractAPIDBFailInsert(t *testing.T) { - rows := sqlmock.NewRows([]string{"id", "interface_id", "ledger", "location", "name", "namespace", "message_id"}) + rows := sqlmock.NewRows([]string{"id", "interface_id", "ledger", "location", "name", "network_name", "namespace", "message_id", "published"}) s, mock := newMockProvider().init() mock.ExpectBegin() + mock.ExpectExec("INSERT .*").WillReturnError(fmt.Errorf("pop")) mock.ExpectQuery("SELECT .*").WillReturnRows(rows) - // mock.ExpectQuery("INSERT .*").WillReturnError(fmt.Errorf("pop")) api := &core.ContractAPI{ Interface: &fftypes.FFIReference{}, } - err := s.UpsertContractAPI(context.Background(), api) + err := s.UpsertContractAPI(context.Background(), api, database.UpsertOptimizationNew) assert.Regexp(t, "FF00177", err) assert.NoError(t, mock.ExpectationsWereMet()) } @@ -119,28 +179,10 @@ func TestContractAPIDBFailUpdate(t *testing.T) { api := &core.ContractAPI{ Interface: &fftypes.FFIReference{}, } - err := s.UpsertContractAPI(context.Background(), api) + err := s.UpsertContractAPI(context.Background(), api, database.UpsertOptimizationNew) assert.Regexp(t, "pop", err) } -func TestUpsertContractAPIIDMismatch(t *testing.T) { - s, db := newMockProvider().init() - callbacks := &databasemocks.Callbacks{} - s.SetHandler("ns1", callbacks) - apiID := fftypes.NewUUID() - api := &core.ContractAPI{ - ID: apiID, - Namespace: "ns1", - } - - db.ExpectBegin() - db.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow("1")) - db.ExpectRollback() - err := s.UpsertContractAPI(context.Background(), api) - assert.Equal(t, database.IDMismatch, err) - assert.NoError(t, db.ExpectationsWereMet()) -} - func TestContractAPIDBFailScan(t *testing.T) { s, mock := newMockProvider().init() apiID := fftypes.NewUUID() @@ -162,7 +204,7 @@ func TestContractAPIDBSelectFail(t *testing.T) { func TestContractAPIDBNoRows(t *testing.T) { s, mock := newMockProvider().init() apiID := fftypes.NewUUID() - mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{"id", "interface_id", "ledger", "location", "name", "namespace", "message_id"})) + mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{"id", "interface_id", "ledger", "location", "name", "network_name", "namespace", "message_id", "published"})) _, err := s.GetContractAPIByID(context.Background(), "ns1", apiID) assert.NoError(t, err) assert.NoError(t, mock.ExpectationsWereMet()) @@ -171,8 +213,8 @@ func TestContractAPIDBNoRows(t *testing.T) { func TestGetContractAPIs(t *testing.T) { fb := database.ContractAPIQueryFactory.NewFilter(context.Background()) s, mock := newMockProvider().init() - rows := sqlmock.NewRows([]string{"id", "interface_id", "location", "name", "namespace", "message_id"}). - AddRow("7e2c001c-e270-4fd7-9e82-9dacee843dc2", "8fcc4938-7d8b-4c00-a71b-1b46837c8ab1", nil, "banana", "ns1", "acfe07a2-117f-46b7-8d47-e3beb7cc382f") + rows := sqlmock.NewRows([]string{"id", "interface_id", "location", "name", "network_name", "namespace", "message_id", "published"}). + AddRow("7e2c001c-e270-4fd7-9e82-9dacee843dc2", "8fcc4938-7d8b-4c00-a71b-1b46837c8ab1", nil, "banana", "banana", "ns1", "acfe07a2-117f-46b7-8d47-e3beb7cc382f", true) mock.ExpectQuery("SELECT .*").WillReturnRows(rows) _, _, err := s.GetContractAPIs(context.Background(), "ns1", fb.And()) assert.NoError(t, err) @@ -198,9 +240,9 @@ func TestGetContractAPIsQueryFail(t *testing.T) { func TestGetContractAPIsQueryResultFail(t *testing.T) { fb := database.ContractAPIQueryFactory.NewFilter(context.Background()) s, mock := newMockProvider().init() - rows := sqlmock.NewRows([]string{"id", "interface_id", "location", "name", "namespace", "message_id"}). - AddRow("7e2c001c-e270-4fd7-9e82-9dacee843dc2", "8fcc4938-7d8b-4c00-a71b-1b46837c8ab1", nil, "apple", "ns1", "acfe07a2-117f-46b7-8d47-e3beb7cc382f"). - AddRow("69851ca3-e9f9-489b-8731-dc6a7d990291", "4db4952e-4669-4243-a387-8f0f609e92bd", nil, "orange", nil, "acfe07a2-117f-46b7-8d47-e3beb7cc382f") + rows := sqlmock.NewRows([]string{"id", "interface_id", "location", "name", "network_name", "namespace", "message_id", "published"}). + AddRow("7e2c001c-e270-4fd7-9e82-9dacee843dc2", "8fcc4938-7d8b-4c00-a71b-1b46837c8ab1", nil, "apple", "apple", "ns1", "acfe07a2-117f-46b7-8d47-e3beb7cc382f", false). + AddRow("69851ca3-e9f9-489b-8731-dc6a7d990291", "4db4952e-4669-4243-a387-8f0f609e92bd", nil, "orange", "orange", nil, "acfe07a2-117f-46b7-8d47-e3beb7cc382f", false) mock.ExpectQuery("SELECT .*").WillReturnRows(rows) _, _, err := s.GetContractAPIs(context.Background(), "ns1", fb.And()) assert.Regexp(t, "FF10121", err) @@ -209,11 +251,29 @@ func TestGetContractAPIsQueryResultFail(t *testing.T) { func TestGetContractAPIByName(t *testing.T) { s, mock := newMockProvider().init() - rows := sqlmock.NewRows([]string{"id", "interface_id", "location", "name", "namespace", "message_id"}). - AddRow("7e2c001c-e270-4fd7-9e82-9dacee843dc2", "8fcc4938-7d8b-4c00-a71b-1b46837c8ab1", nil, "banana", "ns1", "acfe07a2-117f-46b7-8d47-e3beb7cc382f") + rows := sqlmock.NewRows([]string{"id", "interface_id", "location", "name", "network_name", "namespace", "message_id", "published"}). + AddRow("7e2c001c-e270-4fd7-9e82-9dacee843dc2", "8fcc4938-7d8b-4c00-a71b-1b46837c8ab1", nil, "banana", "banana", "ns1", "acfe07a2-117f-46b7-8d47-e3beb7cc382f", true) mock.ExpectQuery("SELECT .*").WillReturnRows(rows) api, err := s.GetContractAPIByName(context.Background(), "ns1", "banana") assert.NotNil(t, api) assert.NoError(t, err) assert.NoError(t, mock.ExpectationsWereMet()) } + +func TestDeleteContractFailBegin(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectBegin().WillReturnError(fmt.Errorf("pop")) + err := s.DeleteContractAPI(context.Background(), "ns1", fftypes.NewUUID()) + assert.Regexp(t, "FF00175", err) + assert.NoError(t, mock.ExpectationsWereMet()) +} + +func TestDeleteContractAPIFailDelete(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectBegin() + mock.ExpectExec("DELETE .*").WillReturnError(fmt.Errorf("pop")) + mock.ExpectRollback() + err := s.DeleteContractAPI(context.Background(), "ns1", fftypes.NewUUID()) + assert.Regexp(t, "FF00179", err) + assert.NoError(t, mock.ExpectationsWereMet()) +} diff --git a/internal/database/sqlcommon/contractlisteners_sql.go b/internal/database/sqlcommon/contractlisteners_sql.go index a4e081dac5..b1c1caaa0e 100644 --- a/internal/database/sqlcommon/contractlisteners_sql.go +++ b/internal/database/sqlcommon/contractlisteners_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2023 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -174,7 +174,7 @@ func (s *SQLCommon) GetContractListeners(ctx context.Context, namespace string, subs = append(subs, sub) } - return subs, s.QueryRes(ctx, contractlistenersTable, tx, fop, fi), err + return subs, s.QueryRes(ctx, contractlistenersTable, tx, fop, nil, fi), err } func (s *SQLCommon) UpdateContractListener(ctx context.Context, ns string, id *fftypes.UUID, update ffapi.Update) (err error) { diff --git a/internal/database/sqlcommon/data_sql.go b/internal/database/sqlcommon/data_sql.go index e828e3dca2..fe63b4788c 100644 --- a/internal/database/sqlcommon/data_sql.go +++ b/internal/database/sqlcommon/data_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2023 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -322,7 +322,7 @@ func (s *SQLCommon) GetData(ctx context.Context, namespace string, filter ffapi. data = append(data, d) } - return data, s.QueryRes(ctx, dataTable, tx, fop, fi), err + return data, s.QueryRes(ctx, dataTable, tx, fop, nil, fi), err } @@ -354,7 +354,7 @@ func (s *SQLCommon) GetDataRefs(ctx context.Context, namespace string, filter ff refs = append(refs, &ref) } - return refs, s.QueryRes(ctx, dataTable, tx, fop, fi), err + return refs, s.QueryRes(ctx, dataTable, tx, fop, nil, fi), err } diff --git a/internal/database/sqlcommon/datatype_sql.go b/internal/database/sqlcommon/datatype_sql.go index f337176266..31aa8ae314 100644 --- a/internal/database/sqlcommon/datatype_sql.go +++ b/internal/database/sqlcommon/datatype_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -196,6 +196,6 @@ func (s *SQLCommon) GetDatatypes(ctx context.Context, namespace string, filter f datatypes = append(datatypes, datatype) } - return datatypes, s.QueryRes(ctx, datatypesTable, tx, fop, fi), err + return datatypes, s.QueryRes(ctx, datatypesTable, tx, fop, nil, fi), err } diff --git a/internal/database/sqlcommon/event_sql.go b/internal/database/sqlcommon/event_sql.go index 574add68e6..1586bdeca0 100644 --- a/internal/database/sqlcommon/event_sql.go +++ b/internal/database/sqlcommon/event_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2023 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -222,6 +222,6 @@ func (s *SQLCommon) GetEvents(ctx context.Context, namespace string, filter ffap events = append(events, event) } - return events, s.QueryRes(ctx, eventsTable, tx, fop, fi), err + return events, s.QueryRes(ctx, eventsTable, tx, fop, nil, fi), err } diff --git a/internal/database/sqlcommon/ffi_errors_sql.go b/internal/database/sqlcommon/ffi_errors_sql.go index 364b903226..9f40815f11 100644 --- a/internal/database/sqlcommon/ffi_errors_sql.go +++ b/internal/database/sqlcommon/ffi_errors_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2023 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -137,6 +137,6 @@ func (s *SQLCommon) GetFFIErrors(ctx context.Context, namespace string, filter f errors = append(errors, ci) } - return errors, s.QueryRes(ctx, ffierrorsTable, tx, fop, fi), err + return errors, s.QueryRes(ctx, ffierrorsTable, tx, fop, nil, fi), err } diff --git a/internal/database/sqlcommon/ffi_events_sql.go b/internal/database/sqlcommon/ffi_events_sql.go index 75f9841ceb..9e741c62f3 100644 --- a/internal/database/sqlcommon/ffi_events_sql.go +++ b/internal/database/sqlcommon/ffi_events_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -165,7 +165,7 @@ func (s *SQLCommon) GetFFIEvents(ctx context.Context, namespace string, filter f events = append(events, ci) } - return events, s.QueryRes(ctx, ffieventsTable, tx, fop, fi), err + return events, s.QueryRes(ctx, ffieventsTable, tx, fop, nil, fi), err } diff --git a/internal/database/sqlcommon/ffi_methods_sql.go b/internal/database/sqlcommon/ffi_methods_sql.go index cb673dfc17..7ad284d0a9 100644 --- a/internal/database/sqlcommon/ffi_methods_sql.go +++ b/internal/database/sqlcommon/ffi_methods_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -169,7 +169,7 @@ func (s *SQLCommon) GetFFIMethods(ctx context.Context, namespace string, filter methods = append(methods, ci) } - return methods, s.QueryRes(ctx, ffimethodsTable, tx, fop, fi), err + return methods, s.QueryRes(ctx, ffimethodsTable, tx, fop, nil, fi), err } diff --git a/internal/database/sqlcommon/ffi_sql.go b/internal/database/sqlcommon/ffi_sql.go index 0d1f4ff2b4..34115a5f2e 100644 --- a/internal/database/sqlcommon/ffi_sql.go +++ b/internal/database/sqlcommon/ffi_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -21,6 +21,7 @@ import ( "database/sql" sq "github.com/Masterminds/squirrel" + "github.com/hyperledger/firefly-common/pkg/dbsql" "github.com/hyperledger/firefly-common/pkg/ffapi" "github.com/hyperledger/firefly-common/pkg/fftypes" "github.com/hyperledger/firefly-common/pkg/i18n" @@ -35,67 +36,142 @@ var ( "id", "namespace", "name", + "network_name", "version", "description", "message_id", + "published", } ffiFilterFieldMap = map[string]string{ - "message": "message_id", + "message": "message_id", + "networkname": "network_name", } ) const ffiTable = "ffi" -func (s *SQLCommon) UpsertFFI(ctx context.Context, ffi *fftypes.FFI) (err error) { - ctx, tx, autoCommit, err := s.BeginOrUseTx(ctx) - if err != nil { - return err +func (s *SQLCommon) attemptFFIUpdate(ctx context.Context, tx *dbsql.TXWrapper, ffi *fftypes.FFI) (int64, error) { + var networkName *string + if ffi.NetworkName != "" { + networkName = &ffi.NetworkName } - defer s.RollbackTx(ctx, tx, autoCommit) + return s.UpdateTx(ctx, ffiTable, tx, + sq.Update(ffiTable). + Set("name", ffi.Name). + Set("network_name", networkName). + Set("version", ffi.Version). + Set("description", ffi.Description). + Set("message_id", ffi.Message). + Set("published", ffi.Published). + Where(sq.Eq{"id": ffi.ID}), + func() { + s.callbacks.UUIDCollectionNSEvent(database.CollectionFFIs, core.ChangeEventTypeUpdated, ffi.Namespace, ffi.ID) + }, + ) +} + +func (s *SQLCommon) setFFIInsertValues(query sq.InsertBuilder, ffi *fftypes.FFI) sq.InsertBuilder { + var networkName *string + if ffi.NetworkName != "" { + networkName = &ffi.NetworkName + } + return query.Values( + ffi.ID, + ffi.Namespace, + ffi.Name, + networkName, + ffi.Version, + ffi.Description, + ffi.Message, + ffi.Published, + ) +} +func (s *SQLCommon) attemptFFIInsert(ctx context.Context, tx *dbsql.TXWrapper, ffi *fftypes.FFI, requestConflictEmptyResult bool) error { + _, err := s.InsertTxExt(ctx, ffiTable, tx, + s.setFFIInsertValues(sq.Insert(ffiTable).Columns(ffiColumns...), ffi), + func() { + s.callbacks.UUIDCollectionNSEvent(database.CollectionFFIs, core.ChangeEventTypeCreated, ffi.Namespace, ffi.ID) + }, requestConflictEmptyResult) + return err +} + +func (s *SQLCommon) ffiExists(ctx context.Context, tx *dbsql.TXWrapper, ffi *fftypes.FFI) (bool, error) { rows, _, err := s.QueryTx(ctx, ffiTable, tx, - sq.Select("id"). - From(ffiTable). - Where(sq.Eq{ + sq.Select("id").From(ffiTable).Where(sq.And{ + sq.Eq{ "namespace": ffi.Namespace, - "id": ffi.ID, - }), + "version": ffi.Version, + }, + sq.Or{ + sq.Eq{"name": ffi.Name}, + sq.Eq{"network_name": ffi.NetworkName}, + }, + }), ) + if err != nil { + return false, err + } + defer rows.Close() + return rows.Next(), nil +} + +func (s *SQLCommon) InsertOrGetFFI(ctx context.Context, ffi *fftypes.FFI) (existing *fftypes.FFI, err error) { + ctx, tx, autoCommit, err := s.BeginOrUseTx(ctx) + if err != nil { + return nil, err + } + defer s.RollbackTx(ctx, tx, autoCommit) + + insertErr := s.attemptFFIInsert(ctx, tx, ffi, true /* we want a failure here we can progress past */) + if insertErr == nil { + return nil, s.CommitTx(ctx, tx, autoCommit) + } + + // Do a select within the transaction to determine if the FFI already exists + existing, queryErr := s.getFFIPred(ctx, ffi.Namespace+":"+ffi.Name, sq.And{ + sq.Eq{"namespace": ffi.Namespace}, + sq.Or{ + sq.Eq{"id": ffi.ID}, + sq.Eq{"name": ffi.Name, "version": ffi.Version}, + sq.Eq{"network_name": ffi.NetworkName, "version": ffi.Version}, + }, + }) + if queryErr != nil || existing != nil { + return existing, queryErr + } + + // Error was apparently not an index conflict - must have been something else + return nil, insertErr +} + +func (s *SQLCommon) UpsertFFI(ctx context.Context, ffi *fftypes.FFI, optimization database.UpsertOptimization) error { + ctx, tx, autoCommit, err := s.BeginOrUseTx(ctx) if err != nil { return err } - existing := rows.Next() - rows.Close() - - if existing { - if _, err = s.UpdateTx(ctx, ffiTable, tx, - sq.Update(ffiTable). - Set("name", ffi.Name). - Set("version", ffi.Version). - Set("description", ffi.Description). - Set("message_id", ffi.Message), - func() { - s.callbacks.UUIDCollectionNSEvent(database.CollectionFFIs, core.ChangeEventTypeUpdated, ffi.Namespace, ffi.ID) - }, - ); err != nil { + defer s.RollbackTx(ctx, tx, autoCommit) + + optimized := false + if optimization == database.UpsertOptimizationNew { + opErr := s.attemptFFIInsert(ctx, tx, ffi, true /* we want a failure here we can progress past */) + optimized = opErr == nil + } else if optimization == database.UpsertOptimizationExisting { + rowsAffected, opErr := s.attemptFFIUpdate(ctx, tx, ffi) + optimized = opErr == nil && rowsAffected == 1 + } + + if !optimized { + // Do a select within the transaction to determine if the FFI already exists + exists, err := s.ffiExists(ctx, tx, ffi) + if err != nil { return err + } else if exists { + if _, err := s.attemptFFIUpdate(ctx, tx, ffi); err != nil { + return err + } } - } else { - if _, err = s.InsertTx(ctx, ffiTable, tx, - sq.Insert(ffiTable). - Columns(ffiColumns...). - Values( - ffi.ID, - ffi.Namespace, - ffi.Name, - ffi.Version, - ffi.Description, - ffi.Message, - ), - func() { - s.callbacks.UUIDCollectionNSEvent(database.CollectionFFIs, core.ChangeEventTypeCreated, ffi.Namespace, ffi.ID) - }, - ); err != nil { + if err := s.attemptFFIInsert(ctx, tx, ffi, false); err != nil { return err } } @@ -105,14 +181,20 @@ func (s *SQLCommon) UpsertFFI(ctx context.Context, ffi *fftypes.FFI) (err error) func (s *SQLCommon) ffiResult(ctx context.Context, row *sql.Rows) (*fftypes.FFI, error) { ffi := fftypes.FFI{} + var networkName *string err := row.Scan( &ffi.ID, &ffi.Namespace, &ffi.Name, + &networkName, &ffi.Version, &ffi.Description, &ffi.Message, + &ffi.Published, ) + if networkName != nil { + ffi.NetworkName = *networkName + } if err != nil { return nil, i18n.WrapError(ctx, err, coremsgs.MsgDBReadErr, ffiTable) } @@ -166,7 +248,7 @@ func (s *SQLCommon) GetFFIs(ctx context.Context, namespace string, filter ffapi. ffis = append(ffis, cd) } - return ffis, s.QueryRes(ctx, ffiTable, tx, fop, fi), err + return ffis, s.QueryRes(ctx, ffiTable, tx, fop, nil, fi), err } @@ -175,5 +257,36 @@ func (s *SQLCommon) GetFFIByID(ctx context.Context, namespace string, id *fftype } func (s *SQLCommon) GetFFI(ctx context.Context, namespace, name, version string) (*fftypes.FFI, error) { - return s.getFFIPred(ctx, namespace+":"+name+":"+version, sq.Eq{"namespace": namespace, "name": name, "version": version}) + return s.getFFIPred(ctx, namespace+":"+name+":"+version, sq.Eq{ + "namespace": namespace, + "name": name, + "version": version, + }) +} + +func (s *SQLCommon) GetFFIByNetworkName(ctx context.Context, namespace, networkName, version string) (*fftypes.FFI, error) { + return s.getFFIPred(ctx, namespace+":"+networkName+":"+version, sq.Eq{ + "namespace": namespace, + "network_name": networkName, + "version": version, + }) +} + +func (s *SQLCommon) DeleteFFI(ctx context.Context, namespace string, id *fftypes.UUID) error { + ctx, tx, autoCommit, err := s.BeginOrUseTx(ctx) + if err != nil { + return err + } + defer s.RollbackTx(ctx, tx, autoCommit) + + err = s.DeleteTx(ctx, ffiTable, tx, sq.Delete(ffiTable).Where(sq.Eq{ + "id": id, "namespace": namespace, + }), func() { + s.callbacks.UUIDCollectionNSEvent(database.CollectionFFIs, core.ChangeEventTypeDeleted, namespace, id) + }) + if err != nil { + return err + } + + return s.CommitTx(ctx, tx, autoCommit) } diff --git a/internal/database/sqlcommon/ffi_sql_test.go b/internal/database/sqlcommon/ffi_sql_test.go index e623d8435e..a620f4749a 100644 --- a/internal/database/sqlcommon/ffi_sql_test.go +++ b/internal/database/sqlcommon/ffi_sql_test.go @@ -43,6 +43,7 @@ func TestFFIE2EWithDB(t *testing.T) { ID: id, Namespace: "ns1", Name: "math", + NetworkName: "math", Version: "v1.0.0", Description: "Does things and stuff", Message: fftypes.NewUUID(), @@ -71,8 +72,9 @@ func TestFFIE2EWithDB(t *testing.T) { s.callbacks.On("UUIDCollectionNSEvent", database.CollectionFFIs, core.ChangeEventTypeCreated, "ns1", ffi.ID).Return() s.callbacks.On("UUIDCollectionNSEvent", database.CollectionFFIs, core.ChangeEventTypeUpdated, "ns1", ffi.ID).Return() + s.callbacks.On("UUIDCollectionNSEvent", database.CollectionFFIs, core.ChangeEventTypeDeleted, "ns1", ffi.ID).Return() - err := s.UpsertFFI(ctx, ffi) + _, err := s.InsertOrGetFFI(ctx, ffi) assert.NoError(t, err) // Check we get the correct fields back @@ -86,8 +88,7 @@ func TestFFIE2EWithDB(t *testing.T) { assert.Equal(t, ffi.Message, dataRead.Message) ffi.Version = "v1.1.0" - - err = s.UpsertFFI(ctx, ffi) + err = s.UpsertFFI(ctx, ffi, database.UpsertOptimizationExisting) assert.NoError(t, err) // Check we get the correct fields back @@ -99,12 +100,43 @@ func TestFFIE2EWithDB(t *testing.T) { assert.Equal(t, ffi.Name, dataRead.Name) assert.Equal(t, ffi.Version, dataRead.Version) assert.Equal(t, ffi.Message, dataRead.Message) + + dataRead, err = s.GetFFIByNetworkName(ctx, "ns1", "math", "v1.1.0") + assert.NoError(t, err) + assert.NotNil(t, dataRead) + assert.Equal(t, ffi.ID, dataRead.ID) + assert.Equal(t, ffi.Namespace, dataRead.Namespace) + assert.Equal(t, ffi.Name, dataRead.Name) + assert.Equal(t, ffi.Version, dataRead.Version) + assert.Equal(t, ffi.Message, dataRead.Message) + + // Cannot insert again with same name or network name + existing, err := s.InsertOrGetFFI(ctx, &fftypes.FFI{ + ID: fftypes.NewUUID(), + Name: "math", + Version: "v1.1.0", + Namespace: "ns1", + }) + assert.NoError(t, err) + assert.Equal(t, ffi.ID, existing.ID) + existing, err = s.InsertOrGetFFI(ctx, &fftypes.FFI{ + ID: fftypes.NewUUID(), + NetworkName: "math", + Version: "v1.1.0", + Namespace: "ns1", + }) + assert.NoError(t, err) + assert.Equal(t, ffi.ID, existing.ID) + + // Delete the FFI + err = s.DeleteFFI(ctx, "ns1", ffi.ID) + assert.NoError(t, err) } func TestFFIDBFailBeginTransaction(t *testing.T) { s, mock := newMockProvider().init() mock.ExpectBegin().WillReturnError(fmt.Errorf("pop")) - err := s.UpsertFFI(context.Background(), &fftypes.FFI{}) + err := s.UpsertFFI(context.Background(), &fftypes.FFI{}, database.UpsertOptimizationNew) assert.Regexp(t, "FF00175", err) assert.NoError(t, mock.ExpectationsWereMet()) } @@ -113,20 +145,39 @@ func TestFFIDBFailSelect(t *testing.T) { s, mock := newMockProvider().init() mock.ExpectBegin() mock.ExpectQuery("SELECT .*").WillReturnError(fmt.Errorf("pop")) - err := s.UpsertFFI(context.Background(), &fftypes.FFI{}) + err := s.UpsertFFI(context.Background(), &fftypes.FFI{}, database.UpsertOptimizationNew) assert.Regexp(t, "pop", err) assert.NoError(t, mock.ExpectationsWereMet()) } +func TestFFIDBFailUpsert(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectBegin() + mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{})) + mock.ExpectExec("INSERT .*").WillReturnError(fmt.Errorf("pop")) + err := s.UpsertFFI(context.Background(), &fftypes.FFI{}, database.UpsertOptimizationNew) + assert.Regexp(t, "pop", err) + assert.NoError(t, mock.ExpectationsWereMet()) +} + +func TestFFIDBInsertFailBegin(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectBegin().WillReturnError(fmt.Errorf("pop")) + _, err := s.InsertOrGetFFI(context.Background(), &fftypes.FFI{}) + assert.Regexp(t, "FF00175", err) + assert.NoError(t, mock.ExpectationsWereMet()) +} + func TestFFIDBFailInsert(t *testing.T) { - rows := sqlmock.NewRows([]string{"id", "namespace", "name", "version"}) s, mock := newMockProvider().init() mock.ExpectBegin() - mock.ExpectQuery("SELECT .*").WillReturnRows(rows) + mock.ExpectExec("INSERT .*").WillReturnError(fmt.Errorf("pop")) + mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{})) + mock.ExpectRollback() ffi := &fftypes.FFI{ ID: fftypes.NewUUID(), } - err := s.UpsertFFI(context.Background(), ffi) + _, err := s.InsertOrGetFFI(context.Background(), ffi) assert.Regexp(t, "FF00177", err) assert.NoError(t, mock.ExpectationsWereMet()) } @@ -141,7 +192,7 @@ func TestFFIDBFailUpdate(t *testing.T) { ffi := &fftypes.FFI{ ID: fftypes.NewUUID(), } - err := s.UpsertFFI(context.Background(), ffi) + err := s.UpsertFFI(context.Background(), ffi, database.UpsertOptimizationNew) assert.Regexp(t, "pop", err) } @@ -176,7 +227,7 @@ func TestGetFFIs(t *testing.T) { fb := database.FFIQueryFactory.NewFilter(context.Background()) s, mock := newMockProvider().init() rows := sqlmock.NewRows(ffiColumns). - AddRow("7e2c001c-e270-4fd7-9e82-9dacee843dc2", "ns1", "math", "v1.0.0", "super mathy things", "acfe07a2-117f-46b7-8d47-e3beb7cc382f") + AddRow("7e2c001c-e270-4fd7-9e82-9dacee843dc2", "ns1", "math", "math", "v1.0.0", "super mathy things", "acfe07a2-117f-46b7-8d47-e3beb7cc382f", false) mock.ExpectQuery("SELECT .*").WillReturnRows(rows) _, _, err := s.GetFFIs(context.Background(), "ns1", fb.And()) assert.NoError(t, err) @@ -214,7 +265,7 @@ func TestGetFFIsQueryResultFail(t *testing.T) { func TestGetFFI(t *testing.T) { s, mock := newMockProvider().init() rows := sqlmock.NewRows(ffiColumns). - AddRow("7e2c001c-e270-4fd7-9e82-9dacee843dc2", "ns1", "math", "v1.0.0", "super mathy things", "acfe07a2-117f-46b7-8d47-e3beb7cc382f") + AddRow("7e2c001c-e270-4fd7-9e82-9dacee843dc2", "ns1", "math", "math", "v1.0.0", "super mathy things", "acfe07a2-117f-46b7-8d47-e3beb7cc382f", false) mock.ExpectQuery("SELECT .*").WillReturnRows(rows) ffi, err := s.GetFFI(context.Background(), "ns1", "math", "v1.0.0") assert.NoError(t, err) @@ -223,3 +274,21 @@ func TestGetFFI(t *testing.T) { assert.Equal(t, "v1.0.0", ffi.Version) assert.NoError(t, mock.ExpectationsWereMet()) } + +func TestDeleteFFIFailBegin(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectBegin().WillReturnError(fmt.Errorf("pop")) + err := s.DeleteFFI(context.Background(), "ns1", fftypes.NewUUID()) + assert.Regexp(t, "FF00175", err) + assert.NoError(t, mock.ExpectationsWereMet()) +} + +func TestDeleteFFIFailDelete(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectBegin() + mock.ExpectExec("DELETE .*").WillReturnError(fmt.Errorf("pop")) + mock.ExpectRollback() + err := s.DeleteFFI(context.Background(), "ns1", fftypes.NewUUID()) + assert.Regexp(t, "FF00179", err) + assert.NoError(t, mock.ExpectationsWereMet()) +} diff --git a/internal/database/sqlcommon/group_sql.go b/internal/database/sqlcommon/group_sql.go index 8309129a12..fec8173e41 100644 --- a/internal/database/sqlcommon/group_sql.go +++ b/internal/database/sqlcommon/group_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -306,5 +306,5 @@ func (s *SQLCommon) GetGroups(ctx context.Context, namespace string, filter ffap } } - return groups, s.QueryRes(ctx, groupsTable, tx, fop, fi), err + return groups, s.QueryRes(ctx, groupsTable, tx, fop, nil, fi), err } diff --git a/internal/database/sqlcommon/identity_sql.go b/internal/database/sqlcommon/identity_sql.go index 0ddf43838e..3a043483ae 100644 --- a/internal/database/sqlcommon/identity_sql.go +++ b/internal/database/sqlcommon/identity_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -229,6 +229,6 @@ func (s *SQLCommon) GetIdentities(ctx context.Context, namespace string, filter identities = append(identities, d) } - return identities, s.QueryRes(ctx, identitiesTable, tx, fop, fi), err + return identities, s.QueryRes(ctx, identitiesTable, tx, fop, nil, fi), err } diff --git a/internal/database/sqlcommon/message_sql.go b/internal/database/sqlcommon/message_sql.go index c1339a4c49..f8df556450 100644 --- a/internal/database/sqlcommon/message_sql.go +++ b/internal/database/sqlcommon/message_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2023 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -50,6 +50,7 @@ var ( "pins", "state", "confirmed", + "reject_reason", "tx_type", "tx_id", "tx_parent_type", @@ -66,6 +67,7 @@ var ( "batch": "batch_id", "group": "group_hash", "idempotencykey": "idempotency_key", + "rejectreason": "reject_reason", } ) @@ -95,6 +97,7 @@ func (s *SQLCommon) attemptMessageUpdate(ctx context.Context, tx *dbsql.TXWrappe Set("pins", message.Pins). Set("state", message.State). Set("confirmed", message.Confirmed). + Set("reject_reason", message.RejectReason). Set("tx_type", message.Header.TxType). Set("tx_id", message.TransactionID). Set("tx_parent_type", txParentType). @@ -137,6 +140,7 @@ func (s *SQLCommon) setMessageInsertValues(query sq.InsertBuilder, message *core message.Pins, message.State, message.Confirmed, + message.RejectReason, message.Header.TxType, message.TransactionID, txParentType, @@ -450,6 +454,7 @@ func (s *SQLCommon) msgResult(ctx context.Context, row *sql.Rows) (*core.Message &msg.Pins, &msg.State, &msg.Confirmed, + &msg.RejectReason, &msg.Header.TxType, &msg.TransactionID, &txParent.Type, @@ -526,7 +531,7 @@ func (s *SQLCommon) getMessagesQuery(ctx context.Context, namespace string, quer return nil, nil, err } } - return msgs, s.QueryRes(ctx, messagesTable, tx, fop, fi), err + return msgs, s.QueryRes(ctx, messagesTable, tx, fop, nil, fi), err } func (s *SQLCommon) GetMessageIDs(ctx context.Context, namespace string, filter ffapi.Filter) (ids []*core.IDAndSequence, err error) { diff --git a/internal/database/sqlcommon/message_sql_test.go b/internal/database/sqlcommon/message_sql_test.go index a0a2604e98..84df7cfed3 100644 --- a/internal/database/sqlcommon/message_sql_test.go +++ b/internal/database/sqlcommon/message_sql_test.go @@ -109,7 +109,7 @@ func TestUpsertE2EWithDB(t *testing.T) { Created: fftypes.Now(), Namespace: "ns12345", Topics: []string{"topic1", "topic2"}, - Tag: "tag1", + Tag: "tag_1", Group: gid, DataHash: fftypes.NewRandB32(), TxType: core.TransactionTypeBatchPin, @@ -237,6 +237,17 @@ func TestUpsertE2EWithDB(t *testing.T) { msgReadJson, _ = json.Marshal(msgRead) assert.Equal(t, string(msgJson), string(msgReadJson)) + // Query with a complex "like" filter + filter = fb.And( + fb.IStartsWith("tag", "TAG_"), + fb.EndsWith("tag", "_1"), + fb.NotContains("tag", "%tag%"), + ) + msgs, _, err = s.GetMessages(ctx, "ns12345", filter) + assert.NoError(t, err) + assert.Equal(t, 1, len(msgs)) + assert.Equal(t, *msgID, *msgs[0].Header.ID) + s.callbacks.AssertExpectations(t) } @@ -565,7 +576,7 @@ func TestGetMessageByIDLoadRefsFail(t *testing.T) { cols := append([]string{}, msgColumns...) cols = append(cols, "id()") mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows(cols). - AddRow(msgID.String(), nil, core.MessageTypeBroadcast, "author1", "0x12345", 0, "ns1", "ns1", "t1", "c1", nil, b32.String(), b32.String(), b32.String(), "confirmed", 0, "pin", nil, "", nil, nil, "bob", 0)) + AddRow(msgID.String(), nil, core.MessageTypeBroadcast, "author1", "0x12345", 0, "ns1", "ns1", "t1", "c1", nil, b32.String(), b32.String(), b32.String(), "confirmed", 0, "", "pin", nil, "", nil, nil, "bob", 0)) mock.ExpectQuery("SELECT .*").WillReturnError(fmt.Errorf("pop")) _, err := s.GetMessageByID(context.Background(), "ns1", msgID) assert.Regexp(t, "FF00176", err) @@ -612,7 +623,7 @@ func TestGetMessagesLoadRefsFail(t *testing.T) { cols := append([]string{}, msgColumns...) cols = append(cols, "id()") mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows(cols). - AddRow(msgID.String(), nil, core.MessageTypeBroadcast, "author1", "0x12345", 0, "ns1", "ns1", "t1", "c1", nil, b32.String(), b32.String(), b32.String(), "confirmed", 0, "pin", nil, "", nil, nil, "bob", 0)) + AddRow(msgID.String(), nil, core.MessageTypeBroadcast, "author1", "0x12345", 0, "ns1", "ns1", "t1", "c1", nil, b32.String(), b32.String(), b32.String(), "confirmed", 0, "", "pin", nil, "", nil, nil, "bob", 0)) mock.ExpectQuery("SELECT .*").WillReturnError(fmt.Errorf("pop")) f := database.MessageQueryFactory.NewFilter(context.Background()).Gt("confirmed", "0") _, _, err := s.GetMessages(context.Background(), "ns1", f) diff --git a/internal/database/sqlcommon/nextpin_sql.go b/internal/database/sqlcommon/nextpin_sql.go index 48afb358b4..0c484b724d 100644 --- a/internal/database/sqlcommon/nextpin_sql.go +++ b/internal/database/sqlcommon/nextpin_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2023 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -138,7 +138,7 @@ func (s *SQLCommon) GetNextPins(ctx context.Context, namespace string, filter ff nextpins = append(nextpins, d) } - return nextpins, s.QueryRes(ctx, pinsTable, tx, fop, fi), err + return nextpins, s.QueryRes(ctx, pinsTable, tx, fop, nil, fi), err } diff --git a/internal/database/sqlcommon/nonce_sql.go b/internal/database/sqlcommon/nonce_sql.go index e42efca3fb..f101034abb 100644 --- a/internal/database/sqlcommon/nonce_sql.go +++ b/internal/database/sqlcommon/nonce_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -141,7 +141,7 @@ func (s *SQLCommon) GetNonces(ctx context.Context, filter ffapi.Filter) (message nonce = append(nonce, d) } - return nonce, s.QueryRes(ctx, noncesTable, tx, fop, fi), err + return nonce, s.QueryRes(ctx, noncesTable, tx, fop, nil, fi), err } diff --git a/internal/database/sqlcommon/offset_sql.go b/internal/database/sqlcommon/offset_sql.go index d84a3cb312..bbd1c329f7 100644 --- a/internal/database/sqlcommon/offset_sql.go +++ b/internal/database/sqlcommon/offset_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -171,7 +171,7 @@ func (s *SQLCommon) GetOffsets(ctx context.Context, filter ffapi.Filter) (messag offset = append(offset, d) } - return offset, s.QueryRes(ctx, offsetsTable, tx, fop, fi), err + return offset, s.QueryRes(ctx, offsetsTable, tx, fop, nil, fi), err } diff --git a/internal/database/sqlcommon/operation_sql.go b/internal/database/sqlcommon/operation_sql.go index 8ef132d1ab..78c45f7faa 100644 --- a/internal/database/sqlcommon/operation_sql.go +++ b/internal/database/sqlcommon/operation_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2023 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -21,6 +21,7 @@ import ( "database/sql" sq "github.com/Masterminds/squirrel" + "github.com/hyperledger/firefly-common/pkg/dbsql" "github.com/hyperledger/firefly-common/pkg/ffapi" "github.com/hyperledger/firefly-common/pkg/fftypes" "github.com/hyperledger/firefly-common/pkg/i18n" @@ -55,6 +56,33 @@ var ( const operationsTable = "operations" +func (s *SQLCommon) setOperationInsertValues(query sq.InsertBuilder, operation *core.Operation) sq.InsertBuilder { + return query.Values( + operation.ID, + operation.Namespace, + operation.Transaction, + string(operation.Type), + string(operation.Status), + operation.Plugin, + operation.Created, + operation.Updated, + operation.Error, + operation.Input, + operation.Output, + operation.Retry, + ) +} + +func (s *SQLCommon) attemptOperationInsert(ctx context.Context, tx *dbsql.TXWrapper, operation *core.Operation) error { + _, err := s.InsertTx(ctx, operationsTable, tx, + s.setOperationInsertValues(sq.Insert(operationsTable).Columns(opColumns...), operation), + func() { + s.callbacks.UUIDCollectionNSEvent(database.CollectionOperations, core.ChangeEventTypeCreated, operation.Namespace, operation.ID) + }, + ) + return err +} + func (s *SQLCommon) InsertOperation(ctx context.Context, operation *core.Operation, hooks ...database.PostCompletionHook) (err error) { ctx, tx, autoCommit, err := s.BeginOrUseTx(ctx) if err != nil { @@ -62,34 +90,55 @@ func (s *SQLCommon) InsertOperation(ctx context.Context, operation *core.Operati } defer s.RollbackTx(ctx, tx, autoCommit) - if _, err = s.InsertTx(ctx, operationsTable, tx, - sq.Insert(operationsTable). - Columns(opColumns...). - Values( - operation.ID, - operation.Namespace, - operation.Transaction, - string(operation.Type), - string(operation.Status), - operation.Plugin, - operation.Created, - operation.Updated, - operation.Error, - operation.Input, - operation.Output, - operation.Retry, - ), - func() { - s.callbacks.UUIDCollectionNSEvent(database.CollectionOperations, core.ChangeEventTypeCreated, operation.Namespace, operation.ID) - for _, hook := range hooks { - hook() - } - }, - ); err != nil { + if err := s.attemptOperationInsert(ctx, tx, operation); err != nil { + return err + } + + for _, hook := range hooks { + tx.AddPostCommitHook(hook) + } + + return s.CommitTx(ctx, tx, autoCommit) +} + +func (s *SQLCommon) InsertOperations(ctx context.Context, operations []*core.Operation, hooks ...database.PostCompletionHook) (err error) { + + ctx, tx, autoCommit, err := s.BeginOrUseTx(ctx) + if err != nil { return err } + defer s.RollbackTx(ctx, tx, autoCommit) + if s.Features().MultiRowInsert { + query := sq.Insert(operationsTable).Columns(opColumns...) + for _, operation := range operations { + query = s.setOperationInsertValues(query, operation) + } + sequences := make([]int64, len(operations)) + + // Use a single multi-row insert for the operations + err := s.InsertTxRows(ctx, operationsTable, tx, query, func() { + for _, operation := range operations { + s.callbacks.UUIDCollectionNSEvent(database.CollectionOperations, core.ChangeEventTypeCreated, operation.Namespace, operation.ID) + } + }, sequences, false /* no circumstances where we expect partial success inserting operations */) + if err != nil { + return err + } + } else { + // Fall back to individual inserts grouped in a TX + for _, operation := range operations { + if err := s.attemptOperationInsert(ctx, tx, operation); err != nil { + return err + } + } + } + + for _, hook := range hooks { + tx.AddPostCommitHook(hook) + } return s.CommitTx(ctx, tx, autoCommit) + } func (s *SQLCommon) opResult(ctx context.Context, row *sql.Rows) (*core.Operation, error) { @@ -161,7 +210,7 @@ func (s *SQLCommon) GetOperations(ctx context.Context, namespace string, filter ops = append(ops, op) } - return ops, s.QueryRes(ctx, operationsTable, tx, fop, fi), err + return ops, s.QueryRes(ctx, operationsTable, tx, fop, nil, fi), err } func (s *SQLCommon) UpdateOperation(ctx context.Context, ns string, id *fftypes.UUID, filter ffapi.Filter, update ffapi.Update) (updated bool, err error) { diff --git a/internal/database/sqlcommon/operation_sql_test.go b/internal/database/sqlcommon/operation_sql_test.go index 79a4a109d9..0d9fc1c5e8 100644 --- a/internal/database/sqlcommon/operation_sql_test.go +++ b/internal/database/sqlcommon/operation_sql_test.go @@ -241,3 +241,68 @@ func TestOperationUpdateFilterFail(t *testing.T) { _, err := s.UpdateOperation(context.Background(), "ns1", fftypes.NewUUID(), f, u) assert.Regexp(t, "FF00143", err) } + +func TestInsertOperationsBeginFail(t *testing.T) { + s := newMockProvider() + s.multiRowInsert = true + s.fakePSQLInsert = true + s, mock := s.init() + op1 := &core.Operation{ID: fftypes.NewUUID(), Namespace: "ns1"} + mock.ExpectBegin().WillReturnError(fmt.Errorf("pop")) + err := s.InsertOperations(context.Background(), []*core.Operation{op1}) + assert.Regexp(t, "FF00175", err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + +func TestInsertOperationsMultiRowOK(t *testing.T) { + s := newMockProvider() + s.multiRowInsert = true + s.fakePSQLInsert = true + s, mock := s.init() + + op1 := &core.Operation{ID: fftypes.NewUUID(), Namespace: "ns1"} + op2 := &core.Operation{ID: fftypes.NewUUID(), Namespace: "ns1"} + s.callbacks.On("UUIDCollectionNSEvent", database.CollectionOperations, core.ChangeEventTypeCreated, "ns1", op1.ID) + s.callbacks.On("UUIDCollectionNSEvent", database.CollectionOperations, core.ChangeEventTypeCreated, "ns1", op2.ID) + + mock.ExpectBegin() + mock.ExpectQuery("INSERT.*operations").WillReturnRows(sqlmock.NewRows([]string{s.SequenceColumn()}). + AddRow(int64(1001)). + AddRow(int64(1002)), + ) + mock.ExpectCommit() + hookCalled := make(chan struct{}, 1) + err := s.InsertOperations(context.Background(), []*core.Operation{op1, op2}, func() { + close(hookCalled) + }) + <-hookCalled + assert.NoError(t, err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + +func TestInsertOperationsMultiRowFail(t *testing.T) { + s := newMockProvider() + s.multiRowInsert = true + s.fakePSQLInsert = true + s, mock := s.init() + op1 := &core.Operation{ID: fftypes.NewUUID(), Namespace: "ns1"} + mock.ExpectBegin() + mock.ExpectQuery("INSERT.*").WillReturnError(fmt.Errorf("pop")) + err := s.InsertOperations(context.Background(), []*core.Operation{op1}) + assert.Regexp(t, "FF00177", err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + +func TestInsertOperationsSingleRowFail(t *testing.T) { + s, mock := newMockProvider().init() + op1 := &core.Operation{ID: fftypes.NewUUID(), Namespace: "ns1"} + mock.ExpectBegin() + mock.ExpectExec("INSERT.*").WillReturnError(fmt.Errorf("pop")) + err := s.InsertOperations(context.Background(), []*core.Operation{op1}) + assert.Regexp(t, "FF00177", err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} diff --git a/internal/database/sqlcommon/pin_sql.go b/internal/database/sqlcommon/pin_sql.go index a2961aa406..bd61079b61 100644 --- a/internal/database/sqlcommon/pin_sql.go +++ b/internal/database/sqlcommon/pin_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2023 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -197,7 +197,7 @@ func (s *SQLCommon) GetPins(ctx context.Context, namespace string, filter ffapi. pin = append(pin, d) } - return pin, s.QueryRes(ctx, pinsTable, tx, fop, fi), err + return pin, s.QueryRes(ctx, pinsTable, tx, fop, nil, fi), err } diff --git a/internal/database/sqlcommon/sqlcommon.go b/internal/database/sqlcommon/sqlcommon.go index 93c1f8c54f..93d7e32460 100644 --- a/internal/database/sqlcommon/sqlcommon.go +++ b/internal/database/sqlcommon/sqlcommon.go @@ -88,7 +88,11 @@ func (s *SQLCommon) SetHandler(namespace string, handler database.Callbacks) { if s.callbacks.handlers == nil { s.callbacks.handlers = make(map[string]database.Callbacks) } - s.callbacks.handlers[namespace] = handler + if handler == nil { + delete(s.callbacks.handlers, namespace) + } else { + s.callbacks.handlers[namespace] = handler + } } func (s *SQLCommon) Capabilities() *database.Capabilities { return s.capabilities } diff --git a/internal/database/sqlcommon/sqlcommon_test.go b/internal/database/sqlcommon/sqlcommon_test.go index 9d909043fe..02ab122f2f 100644 --- a/internal/database/sqlcommon/sqlcommon_test.go +++ b/internal/database/sqlcommon/sqlcommon_test.go @@ -92,11 +92,12 @@ func TestTXConcurrency(t *testing.T) { func TestNamespaceCallbacks(t *testing.T) { tcb := &databasemocks.Callbacks{} - cb := callbacks{ - handlers: map[string]database.Callbacks{ - "ns1": tcb, + s := &SQLCommon{ + callbacks: callbacks{ + handlers: map[string]database.Callbacks{}, }, } + s.SetHandler("ns1", tcb) id := fftypes.NewUUID() hash := fftypes.NewRandB32() @@ -105,8 +106,11 @@ func TestNamespaceCallbacks(t *testing.T) { tcb.On("UUIDCollectionNSEvent", database.CollectionOperations, core.ChangeEventTypeCreated, "ns1", id).Return() tcb.On("HashCollectionNSEvent", database.CollectionGroups, core.ChangeEventTypeUpdated, "ns1", hash).Return() - cb.OrderedUUIDCollectionNSEvent(database.CollectionMessages, core.ChangeEventTypeCreated, "ns1", id, 1) - cb.OrderedCollectionNSEvent(database.CollectionPins, core.ChangeEventTypeCreated, "ns1", 1) - cb.UUIDCollectionNSEvent(database.CollectionOperations, core.ChangeEventTypeCreated, "ns1", id) - cb.HashCollectionNSEvent(database.CollectionGroups, core.ChangeEventTypeUpdated, "ns1", hash) + s.callbacks.OrderedUUIDCollectionNSEvent(database.CollectionMessages, core.ChangeEventTypeCreated, "ns1", id, 1) + s.callbacks.OrderedCollectionNSEvent(database.CollectionPins, core.ChangeEventTypeCreated, "ns1", 1) + s.callbacks.UUIDCollectionNSEvent(database.CollectionOperations, core.ChangeEventTypeCreated, "ns1", id) + s.callbacks.HashCollectionNSEvent(database.CollectionGroups, core.ChangeEventTypeUpdated, "ns1", hash) + + s.SetHandler("ns1", nil) + assert.Empty(t, s.callbacks.handlers) } diff --git a/internal/database/sqlcommon/subscription_sql.go b/internal/database/sqlcommon/subscription_sql.go index 2e887f222d..46b07ff84d 100644 --- a/internal/database/sqlcommon/subscription_sql.go +++ b/internal/database/sqlcommon/subscription_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -210,7 +210,7 @@ func (s *SQLCommon) GetSubscriptions(ctx context.Context, namespace string, filt subscription = append(subscription, d) } - return subscription, s.QueryRes(ctx, subscriptionsTable, tx, fop, fi), err + return subscription, s.QueryRes(ctx, subscriptionsTable, tx, fop, nil, fi), err } diff --git a/internal/database/sqlcommon/subscription_sql_test.go b/internal/database/sqlcommon/subscription_sql_test.go index b96db645f1..30a0802754 100644 --- a/internal/database/sqlcommon/subscription_sql_test.go +++ b/internal/database/sqlcommon/subscription_sql_test.go @@ -68,6 +68,9 @@ func TestSubscriptionsE2EWithDB(t *testing.T) { FirstEvent: &newest, ReadAhead: &fifty, }, + WebhookSubOptions: core.WebhookSubOptions{ + TLSConfigName: "myconfig", + }, } subOpts.TransportOptions()["my-transport-option"] = true subscriptionUpdated := &core.Subscription{ @@ -101,12 +104,13 @@ func TestSubscriptionsE2EWithDB(t *testing.T) { assert.NoError(t, err) // Check we get the exact same data back - note the removal of one of the subscription elements - subscriptionRead, err = s.GetSubscriptionByID(ctx, "ns1", subscription.ID) + subscriptionRead, err = s.GetSubscriptionByID(ctx, "ns1", subscriptionUpdated.ID) assert.NoError(t, err) subscriptionJson, _ = json.Marshal(&subscriptionUpdated) subscriptionReadJson, _ = json.Marshal(&subscriptionRead) assert.Equal(t, string(subscriptionJson), string(subscriptionReadJson)) assert.Equal(t, true, subscriptionRead.Options.TransportOptions()["my-transport-option"]) + assert.Equal(t, "myconfig", subscriptionRead.Options.TLSConfigName) // Query back the subscription fb := database.SubscriptionQueryFactory.NewFilter(ctx) diff --git a/internal/database/sqlcommon/tokenapproval_sql.go b/internal/database/sqlcommon/tokenapproval_sql.go index 6893ea18b4..a529c1e97a 100644 --- a/internal/database/sqlcommon/tokenapproval_sql.go +++ b/internal/database/sqlcommon/tokenapproval_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -202,10 +202,10 @@ func (s *SQLCommon) GetTokenApprovalByID(ctx context.Context, namespace string, return s.getTokenApprovalPred(ctx, localID.String(), sq.Eq{"local_id": localID, "namespace": namespace}) } -func (s *SQLCommon) GetTokenApprovalByProtocolID(ctx context.Context, namespace, connector, protocolID string) (*core.TokenApproval, error) { +func (s *SQLCommon) GetTokenApprovalByProtocolID(ctx context.Context, namespace string, poolID *fftypes.UUID, protocolID string) (*core.TokenApproval, error) { return s.getTokenApprovalPred(ctx, protocolID, sq.And{ sq.Eq{"namespace": namespace}, - sq.Eq{"connector": connector}, + sq.Eq{"pool_id": poolID}, sq.Eq{"protocol_id": protocolID}, }) } @@ -232,7 +232,7 @@ func (s *SQLCommon) GetTokenApprovals(ctx context.Context, namespace string, fil approvals = append(approvals, d) } - return approvals, s.QueryRes(ctx, tokenapprovalTable, tx, fop, fi), err + return approvals, s.QueryRes(ctx, tokenapprovalTable, tx, fop, nil, fi), err } func (s *SQLCommon) UpdateTokenApprovals(ctx context.Context, filter ffapi.Filter, update ffapi.Update) (err error) { @@ -259,3 +259,21 @@ func (s *SQLCommon) UpdateTokenApprovals(ctx context.Context, filter ffapi.Filte return s.CommitTx(ctx, tx, autoCommit) } + +func (s *SQLCommon) DeleteTokenApprovals(ctx context.Context, namespace string, poolID *fftypes.UUID) error { + ctx, tx, autoCommit, err := s.BeginOrUseTx(ctx) + if err != nil { + return err + } + defer s.RollbackTx(ctx, tx, autoCommit) + + err = s.DeleteTx(ctx, tokenapprovalTable, tx, sq.Delete(tokenapprovalTable).Where(sq.Eq{ + "namespace": namespace, + "pool_id": poolID, + }), nil) + if err != nil && err != fftypes.DeleteRecordNotFound { + return err + } + + return s.CommitTx(ctx, tx, autoCommit) +} diff --git a/internal/database/sqlcommon/tokenapproval_sql_test.go b/internal/database/sqlcommon/tokenapproval_sql_test.go index 39e112808b..fcb6122e06 100644 --- a/internal/database/sqlcommon/tokenapproval_sql_test.go +++ b/internal/database/sqlcommon/tokenapproval_sql_test.go @@ -79,7 +79,7 @@ func TestApprovalE2EWithDB(t *testing.T) { assert.Equal(t, string(approvalJson), string(approvalReadJson)) // Query back token approval by protocol ID - approvalRead, err = s.GetTokenApprovalByProtocolID(ctx, "ns1", approval.Connector, approval.ProtocolID) + approvalRead, err = s.GetTokenApprovalByProtocolID(ctx, "ns1", approval.Pool, approval.ProtocolID) assert.NoError(t, err) assert.NotNil(t, approvalRead) approvalReadJson, _ = json.Marshal(&approvalRead) @@ -119,6 +119,10 @@ func TestApprovalE2EWithDB(t *testing.T) { approvalJson, _ = json.Marshal(&approval) approvalReadJson, _ = json.Marshal(&approvalRead) assert.Equal(t, string(approvalJson), string(approvalReadJson)) + + // Delete the token approval + err = s.DeleteTokenApprovals(ctx, "ns1", approval.Pool) + assert.NoError(t, err) } func TestUpsertApprovalFailBegin(t *testing.T) { @@ -259,3 +263,21 @@ func TestUpdateApprovalsUpdateFail(t *testing.T) { err := s.UpdateTokenApprovals(context.Background(), f, u) assert.Regexp(t, "FF00178", err) } + +func TestDeleteTokenApprovalsFailBegin(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectBegin().WillReturnError(fmt.Errorf("pop")) + err := s.DeleteTokenApprovals(context.Background(), "ns1", fftypes.NewUUID()) + assert.Regexp(t, "FF00175", err) + assert.NoError(t, mock.ExpectationsWereMet()) +} + +func TestDeleteTokenApprovalsFailDelete(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectBegin() + mock.ExpectExec("DELETE .*").WillReturnError(fmt.Errorf("pop")) + mock.ExpectRollback() + err := s.DeleteTokenApprovals(context.Background(), "ns1", fftypes.NewUUID()) + assert.Regexp(t, "FF00179", err) + assert.NoError(t, mock.ExpectationsWereMet()) +} diff --git a/internal/database/sqlcommon/tokenbalance_sql.go b/internal/database/sqlcommon/tokenbalance_sql.go index 73945c354b..79834117c4 100644 --- a/internal/database/sqlcommon/tokenbalance_sql.go +++ b/internal/database/sqlcommon/tokenbalance_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -201,7 +201,7 @@ func (s *SQLCommon) GetTokenBalances(ctx context.Context, namespace string, filt accounts = append(accounts, d) } - return accounts, s.QueryRes(ctx, tokenbalanceTable, tx, fop, fi), err + return accounts, s.QueryRes(ctx, tokenbalanceTable, tx, fop, nil, fi), err } func (s *SQLCommon) GetTokenAccounts(ctx context.Context, namespace string, filter ffapi.Filter) ([]*core.TokenAccount, *ffapi.FilterResult, error) { @@ -229,7 +229,7 @@ func (s *SQLCommon) GetTokenAccounts(ctx context.Context, namespace string, filt accounts = append(accounts, &account) } - return accounts, s.QueryRes(ctx, tokenbalanceTable, tx, fop, fi), err + return accounts, s.QueryRes(ctx, tokenbalanceTable, tx, fop, nil, fi), err } func (s *SQLCommon) GetTokenAccountPools(ctx context.Context, namespace, key string, filter ffapi.Filter) ([]*core.TokenAccountPool, *ffapi.FilterResult, error) { @@ -257,5 +257,23 @@ func (s *SQLCommon) GetTokenAccountPools(ctx context.Context, namespace, key str pools = append(pools, &pool) } - return pools, s.QueryRes(ctx, tokenbalanceTable, tx, fop, fi), err + return pools, s.QueryRes(ctx, tokenbalanceTable, tx, fop, nil, fi), err +} + +func (s *SQLCommon) DeleteTokenBalances(ctx context.Context, namespace string, poolID *fftypes.UUID) error { + ctx, tx, autoCommit, err := s.BeginOrUseTx(ctx) + if err != nil { + return err + } + defer s.RollbackTx(ctx, tx, autoCommit) + + err = s.DeleteTx(ctx, tokenbalanceTable, tx, sq.Delete(tokenbalanceTable).Where(sq.Eq{ + "namespace": namespace, + "pool_id": poolID, + }), nil) + if err != nil && err != fftypes.DeleteRecordNotFound { + return err + } + + return s.CommitTx(ctx, tx, autoCommit) } diff --git a/internal/database/sqlcommon/tokenbalance_sql_test.go b/internal/database/sqlcommon/tokenbalance_sql_test.go index ea4765626e..b2bdb6cd18 100644 --- a/internal/database/sqlcommon/tokenbalance_sql_test.go +++ b/internal/database/sqlcommon/tokenbalance_sql_test.go @@ -131,6 +131,10 @@ func TestTokenBalanceE2EWithDB(t *testing.T) { assert.NoError(t, err) assert.Equal(t, 1, len(pools)) assert.Equal(t, *transfer.Pool, *pools[0].Pool) + + // Delete the token balances + err = s.DeleteTokenBalances(ctx, "ns1", transfer.Pool) + assert.NoError(t, err) } func TestUpdateTokenBalancesFailBegin(t *testing.T) { @@ -285,3 +289,21 @@ func TestGetTokenAccountPoolsScanFail(t *testing.T) { assert.Regexp(t, "FF10121", err) assert.NoError(t, mock.ExpectationsWereMet()) } + +func TestDeleteTokenBalancesFailBegin(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectBegin().WillReturnError(fmt.Errorf("pop")) + err := s.DeleteTokenBalances(context.Background(), "ns1", fftypes.NewUUID()) + assert.Regexp(t, "FF00175", err) + assert.NoError(t, mock.ExpectationsWereMet()) +} + +func TestDeleteTokenBalancesFailDelete(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectBegin() + mock.ExpectExec("DELETE .*").WillReturnError(fmt.Errorf("pop")) + mock.ExpectRollback() + err := s.DeleteTokenBalances(context.Background(), "ns1", fftypes.NewUUID()) + assert.Regexp(t, "FF00179", err) + assert.NoError(t, mock.ExpectationsWereMet()) +} diff --git a/internal/database/sqlcommon/tokenpool_sql.go b/internal/database/sqlcommon/tokenpool_sql.go index 9cc5adb2ff..fe52732132 100644 --- a/internal/database/sqlcommon/tokenpool_sql.go +++ b/internal/database/sqlcommon/tokenpool_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2023 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -21,6 +21,7 @@ import ( "database/sql" sq "github.com/Masterminds/squirrel" + "github.com/hyperledger/firefly-common/pkg/dbsql" "github.com/hyperledger/firefly-common/pkg/ffapi" "github.com/hyperledger/firefly-common/pkg/fftypes" "github.com/hyperledger/firefly-common/pkg/i18n" @@ -35,6 +36,7 @@ var ( "id", "namespace", "name", + "network_name", "standard", "locator", "type", @@ -42,7 +44,7 @@ var ( "symbol", "decimals", "message_id", - "state", + "active", "created", "tx_type", "tx_id", @@ -50,113 +52,175 @@ var ( "interface", "interface_format", "methods", + "published", + "plugin_data", } tokenPoolFilterFieldMap = map[string]string{ "message": "message_id", "tx.type": "tx_type", "tx.id": "tx_id", "interfaceformat": "interface_format", + "networkname": "network_name", } ) const tokenpoolTable = "tokenpool" -func (s *SQLCommon) UpsertTokenPool(ctx context.Context, pool *core.TokenPool) (err error) { +func (s *SQLCommon) attemptTokenPoolUpdate(ctx context.Context, tx *dbsql.TXWrapper, pool *core.TokenPool) (int64, error) { + var interfaceID *fftypes.UUID + if pool.Interface != nil { + interfaceID = pool.Interface.ID + } + var networkName *string + if pool.NetworkName != "" { + networkName = &pool.NetworkName + } + return s.UpdateTx(ctx, tokenpoolTable, tx, + sq.Update(tokenpoolTable). + Set("name", pool.Name). + Set("network_name", networkName). + Set("standard", pool.Standard). + Set("locator", pool.Locator). + Set("type", pool.Type). + Set("connector", pool.Connector). + Set("symbol", pool.Symbol). + Set("decimals", pool.Decimals). + Set("message_id", pool.Message). + Set("active", pool.Active). + Set("tx_type", pool.TX.Type). + Set("tx_id", pool.TX.ID). + Set("info", pool.Info). + Set("interface", interfaceID). + Set("interface_format", pool.InterfaceFormat). + Set("methods", pool.Methods). + Set("published", pool.Published). + Set("plugin_data", pool.PluginData). + Where(sq.Eq{"id": pool.ID}), + func() { + s.callbacks.UUIDCollectionNSEvent(database.CollectionTokenPools, core.ChangeEventTypeUpdated, pool.Namespace, pool.ID) + }, + ) +} + +func (s *SQLCommon) setTokenPoolInsertValues(query sq.InsertBuilder, pool *core.TokenPool, created *fftypes.FFTime) sq.InsertBuilder { + var interfaceID *fftypes.UUID + if pool.Interface != nil { + interfaceID = pool.Interface.ID + } + var networkName *string + if pool.NetworkName != "" { + networkName = &pool.NetworkName + } + return query.Values( + pool.ID, + pool.Namespace, + pool.Name, + networkName, + pool.Standard, + pool.Locator, + pool.Type, + pool.Connector, + pool.Symbol, + pool.Decimals, + pool.Message, + pool.Active, + created, + pool.TX.Type, + pool.TX.ID, + pool.Info, + interfaceID, + pool.InterfaceFormat, + pool.Methods, + pool.Published, + pool.PluginData, + ) +} + +func (s *SQLCommon) attemptTokenPoolInsert(ctx context.Context, tx *dbsql.TXWrapper, pool *core.TokenPool, requestConflictEmptyResult bool) error { + created := fftypes.Now() + _, err := s.InsertTxExt(ctx, tokenpoolTable, tx, + s.setTokenPoolInsertValues(sq.Insert(tokenpoolTable).Columns(tokenPoolColumns...), pool, created), + func() { + s.callbacks.UUIDCollectionNSEvent(database.CollectionTokenPools, core.ChangeEventTypeCreated, pool.Namespace, pool.ID) + }, requestConflictEmptyResult) + if err == nil { + pool.Created = created + } + return err +} + +func (s *SQLCommon) tokenPoolExists(ctx context.Context, tx *dbsql.TXWrapper, pool *core.TokenPool) (bool, error) { + rows, _, err := s.QueryTx(ctx, tokenpoolTable, tx, + sq.Select("id").From(tokenpoolTable).Where(sq.And{ + sq.Eq{"namespace": pool.Namespace}, + sq.Or{ + sq.Eq{"name": pool.Name}, + sq.Eq{"network_name": pool.NetworkName}, + }, + }), + ) + if err != nil { + return false, err + } + defer rows.Close() + return rows.Next(), nil +} + +func (s *SQLCommon) InsertOrGetTokenPool(ctx context.Context, pool *core.TokenPool) (existing *core.TokenPool, err error) { ctx, tx, autoCommit, err := s.BeginOrUseTx(ctx) if err != nil { - return err + return nil, err } defer s.RollbackTx(ctx, tx, autoCommit) - rows, _, err := s.QueryTx(ctx, tokenpoolTable, tx, - sq.Select("id"). - From(tokenpoolTable). - Where(sq.And{ - sq.Eq{"namespace": pool.Namespace}, - sq.Or{ - sq.Eq{"name": pool.Name}, - sq.Eq{ - "connector": pool.Connector, - "locator": pool.Locator, - }, - }, - }), - ) + insertErr := s.attemptTokenPoolInsert(ctx, tx, pool, true /* we want a failure here we can progress past */) + if insertErr == nil { + return nil, s.CommitTx(ctx, tx, autoCommit) + } + + // Do a select within the transaction to determine if the pool already exists + existing, queryErr := s.getTokenPoolPred(ctx, pool.Namespace+":"+pool.Name, sq.And{ + sq.Eq{"namespace": pool.Namespace}, + sq.Or{ + sq.Eq{"id": pool.ID}, + sq.Eq{"name": pool.Name}, + sq.Eq{"network_name": pool.NetworkName}, + }, + }) + if queryErr != nil || existing != nil { + return existing, queryErr + } + + // Error was apparently not an index conflict - must have been something else + return nil, insertErr +} + +func (s *SQLCommon) UpsertTokenPool(ctx context.Context, pool *core.TokenPool, optimization database.UpsertOptimization) (err error) { + ctx, tx, autoCommit, err := s.BeginOrUseTx(ctx) if err != nil { return err } - existing := rows.Next() - - if existing { - var id fftypes.UUID - _ = rows.Scan(&id) - if pool.ID != nil && *pool.ID != id { - rows.Close() - return database.IDMismatch - } - pool.ID = &id // Update on returned object - } - rows.Close() + defer s.RollbackTx(ctx, tx, autoCommit) - var interfaceID *fftypes.UUID - if pool.Interface != nil { - interfaceID = pool.Interface.ID + optimized := false + if optimization == database.UpsertOptimizationNew { + opErr := s.attemptTokenPoolInsert(ctx, tx, pool, true /* we want a failure here we can progress past */) + optimized = opErr == nil + } else if optimization == database.UpsertOptimizationExisting { + rowsAffected, opErr := s.attemptTokenPoolUpdate(ctx, tx, pool) + optimized = opErr == nil && rowsAffected == 1 } - if existing { - if _, err = s.UpdateTx(ctx, tokenpoolTable, tx, - sq.Update(tokenpoolTable). - Set("name", pool.Name). - Set("standard", pool.Standard). - Set("locator", pool.Locator). - Set("type", pool.Type). - Set("connector", pool.Connector). - Set("symbol", pool.Symbol). - Set("decimals", pool.Decimals). - Set("message_id", pool.Message). - Set("state", pool.State). - Set("tx_type", pool.TX.Type). - Set("tx_id", pool.TX.ID). - Set("info", pool.Info). - Set("interface", interfaceID). - Set("interface_format", pool.InterfaceFormat). - Set("methods", pool.Methods). - Where(sq.Eq{"id": pool.ID}), - func() { - s.callbacks.UUIDCollectionNSEvent(database.CollectionTokenPools, core.ChangeEventTypeUpdated, pool.Namespace, pool.ID) - }, - ); err != nil { + if !optimized { + // Do a select within the transaction to determine if the pool already exists + exists, err := s.tokenPoolExists(ctx, tx, pool) + if err != nil { return err - } - } else { - pool.Created = fftypes.Now() - if _, err = s.InsertTx(ctx, tokenpoolTable, tx, - sq.Insert(tokenpoolTable). - Columns(tokenPoolColumns...). - Values( - pool.ID, - pool.Namespace, - pool.Name, - pool.Standard, - pool.Locator, - pool.Type, - pool.Connector, - pool.Symbol, - pool.Decimals, - pool.Message, - pool.State, - pool.Created, - pool.TX.Type, - pool.TX.ID, - pool.Info, - interfaceID, - pool.InterfaceFormat, - pool.Methods, - ), - func() { - s.callbacks.UUIDCollectionNSEvent(database.CollectionTokenPools, core.ChangeEventTypeCreated, pool.Namespace, pool.ID) - }, - ); err != nil { + } else if exists { + if _, err := s.attemptTokenPoolUpdate(ctx, tx, pool); err != nil { + return err + } + } else if err := s.attemptTokenPoolInsert(ctx, tx, pool, false); err != nil { return err } } @@ -167,10 +231,12 @@ func (s *SQLCommon) UpsertTokenPool(ctx context.Context, pool *core.TokenPool) ( func (s *SQLCommon) tokenPoolResult(ctx context.Context, row *sql.Rows) (*core.TokenPool, error) { pool := core.TokenPool{} iface := fftypes.FFIReference{} + var networkName *string err := row.Scan( &pool.ID, &pool.Namespace, &pool.Name, + &networkName, &pool.Standard, &pool.Locator, &pool.Type, @@ -178,7 +244,7 @@ func (s *SQLCommon) tokenPoolResult(ctx context.Context, row *sql.Rows) (*core.T &pool.Symbol, &pool.Decimals, &pool.Message, - &pool.State, + &pool.Active, &pool.Created, &pool.TX.Type, &pool.TX.ID, @@ -186,10 +252,15 @@ func (s *SQLCommon) tokenPoolResult(ctx context.Context, row *sql.Rows) (*core.T &iface.ID, &pool.InterfaceFormat, &pool.Methods, + &pool.Published, + &pool.PluginData, ) if iface.ID != nil { pool.Interface = &iface } + if networkName != nil { + pool.NetworkName = *networkName + } if err != nil { return nil, i18n.WrapError(ctx, err, coremsgs.MsgDBReadErr, tokenpoolTable) } @@ -228,12 +299,8 @@ func (s *SQLCommon) GetTokenPoolByID(ctx context.Context, namespace string, id * return s.getTokenPoolPred(ctx, id.String(), sq.Eq{"id": id, "namespace": namespace}) } -func (s *SQLCommon) GetTokenPoolByLocator(ctx context.Context, namespace, connector, locator string) (*core.TokenPool, error) { - return s.getTokenPoolPred(ctx, locator, sq.And{ - sq.Eq{"namespace": namespace}, - sq.Eq{"connector": connector}, - sq.Eq{"locator": locator}, - }) +func (s *SQLCommon) GetTokenPoolByNetworkName(ctx context.Context, namespace, networkName string) (*core.TokenPool, error) { + return s.getTokenPoolPred(ctx, networkName, sq.Eq{"namespace": namespace, "network_name": networkName}) } func (s *SQLCommon) GetTokenPools(ctx context.Context, namespace string, filter ffapi.Filter) (message []*core.TokenPool, fr *ffapi.FilterResult, err error) { @@ -258,5 +325,24 @@ func (s *SQLCommon) GetTokenPools(ctx context.Context, namespace string, filter pools = append(pools, d) } - return pools, s.QueryRes(ctx, tokenpoolTable, tx, fop, fi), err + return pools, s.QueryRes(ctx, tokenpoolTable, tx, fop, nil, fi), err +} + +func (s *SQLCommon) DeleteTokenPool(ctx context.Context, namespace string, id *fftypes.UUID) error { + ctx, tx, autoCommit, err := s.BeginOrUseTx(ctx) + if err != nil { + return err + } + defer s.RollbackTx(ctx, tx, autoCommit) + + err = s.DeleteTx(ctx, "tokenpool", tx, sq.Delete("tokenpool").Where(sq.Eq{ + "id": id, "namespace": namespace, + }), func() { + s.callbacks.UUIDCollectionNSEvent(database.CollectionTokenPools, core.ChangeEventTypeDeleted, namespace, id) + }) + if err != nil { + return err + } + + return s.CommitTx(ctx, tx, autoCommit) } diff --git a/internal/database/sqlcommon/tokenpool_sql_test.go b/internal/database/sqlcommon/tokenpool_sql_test.go index 1d9d6d01d8..7e96f5466e 100644 --- a/internal/database/sqlcommon/tokenpool_sql_test.go +++ b/internal/database/sqlcommon/tokenpool_sql_test.go @@ -38,17 +38,18 @@ func TestTokenPoolE2EWithDB(t *testing.T) { // Create a new token pool entry poolID := fftypes.NewUUID() pool := &core.TokenPool{ - ID: poolID, - Namespace: "ns1", - Name: "my-pool", - Standard: "ERC1155", - Type: core.TokenTypeFungible, - Locator: "12345", - Connector: "erc1155", - Symbol: "COIN", - Decimals: 18, - Message: fftypes.NewUUID(), - State: core.TokenPoolStateConfirmed, + ID: poolID, + Namespace: "ns1", + Name: "my-pool", + NetworkName: "my-pool", + Standard: "ERC1155", + Type: core.TokenTypeFungible, + Locator: "12345", + Connector: "erc1155", + Symbol: "COIN", + Decimals: 18, + Message: fftypes.NewUUID(), + Active: true, TX: core.TransactionRef{ Type: core.TransactionTypeTokenPool, ID: fftypes.NewUUID(), @@ -66,10 +67,12 @@ func TestTokenPoolE2EWithDB(t *testing.T) { Return().Once() s.callbacks.On("UUIDCollectionNSEvent", database.CollectionTokenPools, core.ChangeEventTypeUpdated, "ns1", poolID, mock.Anything). Return().Once() + s.callbacks.On("UUIDCollectionNSEvent", database.CollectionTokenPools, core.ChangeEventTypeDeleted, "ns1", poolID, mock.Anything). + Return().Once() - err := s.UpsertTokenPool(ctx, pool) + // Insert the pool + _, err := s.InsertOrGetTokenPool(ctx, pool) assert.NoError(t, err) - assert.NotNil(t, pool.Created) poolJson, _ := json.Marshal(&pool) @@ -87,8 +90,8 @@ func TestTokenPoolE2EWithDB(t *testing.T) { poolReadJson, _ = json.Marshal(&poolRead) assert.Equal(t, string(poolJson), string(poolReadJson)) - // Query back the token pool (by locator) - poolRead, err = s.GetTokenPoolByLocator(ctx, "ns1", pool.Connector, pool.Locator) + // Query back the token pool (by network name) + poolRead, err = s.GetTokenPoolByNetworkName(ctx, pool.Namespace, pool.NetworkName) assert.NoError(t, err) assert.NotNil(t, poolRead) poolReadJson, _ = json.Marshal(&poolRead) @@ -110,10 +113,32 @@ func TestTokenPoolE2EWithDB(t *testing.T) { poolReadJson, _ = json.Marshal(pools[0]) assert.Equal(t, string(poolJson), string(poolReadJson)) + // Cannot insert again with same ID, name, or network name + existing, err := s.InsertOrGetTokenPool(ctx, &core.TokenPool{ + ID: pool.ID, + Namespace: "ns1", + }) + assert.NoError(t, err) + assert.Equal(t, pool.ID, existing.ID) + existing, err = s.InsertOrGetTokenPool(ctx, &core.TokenPool{ + ID: fftypes.NewUUID(), + Name: "my-pool", + Namespace: "ns1", + }) + assert.NoError(t, err) + assert.Equal(t, pool.ID, existing.ID) + existing, err = s.InsertOrGetTokenPool(ctx, &core.TokenPool{ + ID: fftypes.NewUUID(), + NetworkName: "my-pool", + Namespace: "ns1", + }) + assert.NoError(t, err) + assert.Equal(t, pool.ID, existing.ID) + // Update the token pool pool.Locator = "67890" pool.Type = core.TokenTypeNonFungible - err = s.UpsertTokenPool(ctx, pool) + err = s.UpsertTokenPool(ctx, pool, database.UpsertOptimizationExisting) assert.NoError(t, err) // Query back the token pool (by ID) @@ -124,30 +149,23 @@ func TestTokenPoolE2EWithDB(t *testing.T) { poolReadJson, _ = json.Marshal(&poolRead) assert.Equal(t, string(poolJson), string(poolReadJson)) - // Cannot create with new ID but different name - newPool := &core.TokenPool{ - ID: fftypes.NewUUID(), - Namespace: pool.Namespace, - Name: pool.Name, - } - err = s.UpsertTokenPool(ctx, newPool) - assert.Equal(t, database.IDMismatch, err) - - // Cannot create with new ID but different locator - newPool = &core.TokenPool{ - ID: fftypes.NewUUID(), - Namespace: pool.Namespace, - Connector: pool.Connector, - Locator: pool.Locator, - } - err = s.UpsertTokenPool(ctx, newPool) - assert.Equal(t, database.IDMismatch, err) + // Delete the token pool + err = s.DeleteTokenPool(ctx, "ns1", pool.ID) + assert.NoError(t, err) } func TestUpsertTokenPoolFailBegin(t *testing.T) { s, mock := newMockProvider().init() mock.ExpectBegin().WillReturnError(fmt.Errorf("pop")) - err := s.UpsertTokenPool(context.Background(), &core.TokenPool{}) + err := s.UpsertTokenPool(context.Background(), &core.TokenPool{}, database.UpsertOptimizationNew) + assert.Regexp(t, "FF00175", err) + assert.NoError(t, mock.ExpectationsWereMet()) +} + +func TestInsertOrGetTokenPoolFailBegin(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectBegin().WillReturnError(fmt.Errorf("pop")) + _, err := s.InsertOrGetTokenPool(context.Background(), &core.TokenPool{}) assert.Regexp(t, "FF00175", err) assert.NoError(t, mock.ExpectationsWereMet()) } @@ -156,18 +174,40 @@ func TestUpsertTokenPoolFailSelect(t *testing.T) { s, mock := newMockProvider().init() mock.ExpectBegin() mock.ExpectQuery("SELECT .*").WillReturnError(fmt.Errorf("pop")) - err := s.UpsertTokenPool(context.Background(), &core.TokenPool{}) + mock.ExpectRollback() + err := s.UpsertTokenPool(context.Background(), &core.TokenPool{}, database.UpsertOptimizationNew) assert.Regexp(t, "FF00176", err) assert.NoError(t, mock.ExpectationsWereMet()) } -func TestUpsertTokenPoolFailInsert(t *testing.T) { +func TestInsertOrGetTokenPoolFailSelectName(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectBegin() + mock.ExpectQuery("SELECT .*").WillReturnError(fmt.Errorf("pop")) + mock.ExpectRollback() + _, err := s.InsertOrGetTokenPool(context.Background(), &core.TokenPool{}) + assert.Regexp(t, "FF00176", err) + assert.NoError(t, mock.ExpectationsWereMet()) +} + +func TestInsertOrGetTokenPoolFailInsert(t *testing.T) { s, mock := newMockProvider().init() mock.ExpectBegin() + mock.ExpectExec("INSERT .*").WillReturnError(fmt.Errorf("pop")) mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{})) + mock.ExpectRollback() + _, err := s.InsertOrGetTokenPool(context.Background(), &core.TokenPool{}) + assert.Regexp(t, "FF00177", err) + assert.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUpsertTokenPoolFailInsert(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectBegin() mock.ExpectExec("INSERT .*").WillReturnError(fmt.Errorf("pop")) + mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{})) mock.ExpectRollback() - err := s.UpsertTokenPool(context.Background(), &core.TokenPool{}) + err := s.UpsertTokenPool(context.Background(), &core.TokenPool{}, database.UpsertOptimizationNew) assert.Regexp(t, "FF00177", err) assert.NoError(t, mock.ExpectationsWereMet()) } @@ -175,10 +215,11 @@ func TestUpsertTokenPoolFailInsert(t *testing.T) { func TestUpsertTokenPoolFailUpdate(t *testing.T) { s, mock := newMockProvider().init() mock.ExpectBegin() + mock.ExpectExec("INSERT .*").WillReturnError(fmt.Errorf("pop")) mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow("1")) mock.ExpectExec("UPDATE .*").WillReturnError(fmt.Errorf("pop")) mock.ExpectRollback() - err := s.UpsertTokenPool(context.Background(), &core.TokenPool{}) + err := s.UpsertTokenPool(context.Background(), &core.TokenPool{}, database.UpsertOptimizationNew) assert.Regexp(t, "FF00178", err) assert.NoError(t, mock.ExpectationsWereMet()) } @@ -186,10 +227,9 @@ func TestUpsertTokenPoolFailUpdate(t *testing.T) { func TestUpsertTokenPoolFailCommit(t *testing.T) { s, mock := newMockProvider().init() mock.ExpectBegin() - mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{"id"})) mock.ExpectExec("INSERT .*").WillReturnResult(sqlmock.NewResult(1, 1)) mock.ExpectCommit().WillReturnError(fmt.Errorf("pop")) - err := s.UpsertTokenPool(context.Background(), &core.TokenPool{}) + err := s.UpsertTokenPool(context.Background(), &core.TokenPool{}, database.UpsertOptimizationNew) assert.Regexp(t, "FF00180", err) assert.NoError(t, mock.ExpectationsWereMet()) } @@ -246,3 +286,21 @@ func TestGetTokenPoolsScanFail(t *testing.T) { assert.Regexp(t, "FF10121", err) assert.NoError(t, mock.ExpectationsWereMet()) } + +func TestDeleteTokenPoolFailBegin(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectBegin().WillReturnError(fmt.Errorf("pop")) + err := s.DeleteTokenPool(context.Background(), "ns1", fftypes.NewUUID()) + assert.Regexp(t, "FF00175", err) + assert.NoError(t, mock.ExpectationsWereMet()) +} + +func TestDeleteTokenPoolFailDelete(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectBegin() + mock.ExpectExec("DELETE .*").WillReturnError(fmt.Errorf("pop")) + mock.ExpectRollback() + err := s.DeleteTokenPool(context.Background(), "ns1", fftypes.NewUUID()) + assert.Regexp(t, "FF00179", err) + assert.NoError(t, mock.ExpectationsWereMet()) +} diff --git a/internal/database/sqlcommon/tokentransfer_sql.go b/internal/database/sqlcommon/tokentransfer_sql.go index 4673c1f9a8..00ea407ef9 100644 --- a/internal/database/sqlcommon/tokentransfer_sql.go +++ b/internal/database/sqlcommon/tokentransfer_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -21,6 +21,7 @@ import ( "database/sql" sq "github.com/Masterminds/squirrel" + "github.com/hyperledger/firefly-common/pkg/dbsql" "github.com/hyperledger/firefly-common/pkg/ffapi" "github.com/hyperledger/firefly-common/pkg/fftypes" "github.com/hyperledger/firefly-common/pkg/i18n" @@ -69,86 +70,60 @@ var ( const tokentransferTable = "tokentransfer" -func (s *SQLCommon) UpsertTokenTransfer(ctx context.Context, transfer *core.TokenTransfer) (err error) { +func (s *SQLCommon) setTokenTransferEventInsertValues(query sq.InsertBuilder, transfer *core.TokenTransfer) sq.InsertBuilder { + return query.Values( + transfer.Type, + transfer.LocalID, + transfer.Pool, + transfer.TokenIndex, + transfer.URI, + transfer.Connector, + transfer.Namespace, + transfer.Key, + transfer.From, + transfer.To, + transfer.Amount, + transfer.ProtocolID, + transfer.Message, + transfer.MessageHash, + transfer.TX.Type, + transfer.TX.ID, + transfer.BlockchainEvent, + transfer.Created, + ) +} + +func (s *SQLCommon) attemptTokenTransferEventInsert(ctx context.Context, tx *dbsql.TXWrapper, transfer *core.TokenTransfer, requestConflictEmptyResult bool) (err error) { + _, err = s.InsertTxExt(ctx, tokentransferTable, tx, + s.setTokenTransferEventInsertValues(sq.Insert(tokentransferTable).Columns(tokenTransferColumns...), transfer), + func() { + s.callbacks.UUIDCollectionNSEvent(database.CollectionTokenTransfers, core.ChangeEventTypeCreated, transfer.Namespace, transfer.LocalID) + }, requestConflictEmptyResult) + return err +} + +func (s *SQLCommon) InsertOrGetTokenTransfer(ctx context.Context, transfer *core.TokenTransfer) (existing *core.TokenTransfer, err error) { ctx, tx, autoCommit, err := s.BeginOrUseTx(ctx) if err != nil { - return err + return nil, err } defer s.RollbackTx(ctx, tx, autoCommit) - rows, _, err := s.QueryTx(ctx, tokentransferTable, tx, - sq.Select("seq"). - From(tokentransferTable). - Where(sq.Eq{ - "protocol_id": transfer.ProtocolID, - "namespace": transfer.Namespace, - }), - ) - if err != nil { - return err - } - existing := rows.Next() - rows.Close() - - if existing { - if _, err = s.UpdateTx(ctx, tokentransferTable, tx, - sq.Update(tokentransferTable). - Set("type", transfer.Type). - Set("local_id", transfer.LocalID). - Set("pool_id", transfer.Pool). - Set("token_index", transfer.TokenIndex). - Set("uri", transfer.URI). - Set("connector", transfer.Connector). - Set("key", transfer.Key). - Set("from_key", transfer.From). - Set("to_key", transfer.To). - Set("amount", transfer.Amount). - Set("message_id", transfer.Message). - Set("message_hash", transfer.MessageHash). - Set("tx_type", transfer.TX.Type). - Set("tx_id", transfer.TX.ID). - Set("blockchain_event", transfer.BlockchainEvent). - Where(sq.Eq{"protocol_id": transfer.ProtocolID}), - func() { - s.callbacks.UUIDCollectionNSEvent(database.CollectionTokenTransfers, core.ChangeEventTypeUpdated, transfer.Namespace, transfer.LocalID) - }, - ); err != nil { - return err - } - } else { + if transfer.Created == nil { transfer.Created = fftypes.Now() - if _, err = s.InsertTx(ctx, tokentransferTable, tx, - sq.Insert(tokentransferTable). - Columns(tokenTransferColumns...). - Values( - transfer.Type, - transfer.LocalID, - transfer.Pool, - transfer.TokenIndex, - transfer.URI, - transfer.Connector, - transfer.Namespace, - transfer.Key, - transfer.From, - transfer.To, - transfer.Amount, - transfer.ProtocolID, - transfer.Message, - transfer.MessageHash, - transfer.TX.Type, - transfer.TX.ID, - transfer.BlockchainEvent, - transfer.Created, - ), - func() { - s.callbacks.UUIDCollectionNSEvent(database.CollectionTokenTransfers, core.ChangeEventTypeCreated, transfer.Namespace, transfer.LocalID) - }, - ); err != nil { - return err - } + } + opErr := s.attemptTokenTransferEventInsert(ctx, tx, transfer, true /* we want a failure here we can progress past */) + if opErr == nil { + return nil, s.CommitTx(ctx, tx, autoCommit) } - return s.CommitTx(ctx, tx, autoCommit) + // Do a select within the transaction to determine if the protocolID already exists + existing, err = s.GetTokenTransferByProtocolID(ctx, transfer.Namespace, transfer.Pool, transfer.ProtocolID) + if err != nil || existing != nil { + return existing, err + } + // Error was apparently not a protocolID conflict - must have been something else + return nil, opErr } func (s *SQLCommon) tokenTransferResult(ctx context.Context, row *sql.Rows) (*core.TokenTransfer, error) { @@ -207,10 +182,10 @@ func (s *SQLCommon) GetTokenTransferByID(ctx context.Context, namespace string, return s.getTokenTransferPred(ctx, localID.String(), sq.Eq{"local_id": localID, "namespace": namespace}) } -func (s *SQLCommon) GetTokenTransferByProtocolID(ctx context.Context, namespace, connector, protocolID string) (*core.TokenTransfer, error) { +func (s *SQLCommon) GetTokenTransferByProtocolID(ctx context.Context, namespace string, poolID *fftypes.UUID, protocolID string) (*core.TokenTransfer, error) { return s.getTokenTransferPred(ctx, protocolID, sq.And{ sq.Eq{"namespace": namespace}, - sq.Eq{"connector": connector}, + sq.Eq{"pool_id": poolID}, sq.Eq{"protocol_id": protocolID}, }) } @@ -237,5 +212,23 @@ func (s *SQLCommon) GetTokenTransfers(ctx context.Context, namespace string, fil transfers = append(transfers, d) } - return transfers, s.QueryRes(ctx, tokentransferTable, tx, fop, fi), err + return transfers, s.QueryRes(ctx, tokentransferTable, tx, fop, nil, fi), err +} + +func (s *SQLCommon) DeleteTokenTransfers(ctx context.Context, namespace string, poolID *fftypes.UUID) error { + ctx, tx, autoCommit, err := s.BeginOrUseTx(ctx) + if err != nil { + return err + } + defer s.RollbackTx(ctx, tx, autoCommit) + + err = s.DeleteTx(ctx, tokentransferTable, tx, sq.Delete(tokentransferTable).Where(sq.Eq{ + "namespace": namespace, + "pool_id": poolID, + }), nil) + if err != nil && err != fftypes.DeleteRecordNotFound { + return err + } + + return s.CommitTx(ctx, tx, autoCommit) } diff --git a/internal/database/sqlcommon/tokentransfer_sql_test.go b/internal/database/sqlcommon/tokentransfer_sql_test.go index 072b7487b3..7717c1527c 100644 --- a/internal/database/sqlcommon/tokentransfer_sql_test.go +++ b/internal/database/sqlcommon/tokentransfer_sql_test.go @@ -30,12 +30,7 @@ import ( "github.com/stretchr/testify/mock" ) -func TestTokenTransferE2EWithDB(t *testing.T) { - s, cleanup := newSQLiteTestProvider(t) - defer cleanup() - ctx := context.Background() - - // Create a new token transfer entry +func newTestTransfer() *core.TokenTransfer { transfer := &core.TokenTransfer{ LocalID: fftypes.NewUUID(), Type: core.TokenTransferTypeTransfer, @@ -56,14 +51,25 @@ func TestTokenTransferE2EWithDB(t *testing.T) { BlockchainEvent: fftypes.NewUUID(), } transfer.Amount.Int().SetInt64(10) + return transfer +} + +func TestTokenTransferE2EWithDB(t *testing.T) { + s, cleanup := newSQLiteTestProvider(t) + defer cleanup() + ctx := context.Background() + + // Create a new token transfer entry + transfer := newTestTransfer() s.callbacks.On("UUIDCollectionNSEvent", database.CollectionTokenTransfers, core.ChangeEventTypeCreated, transfer.Namespace, transfer.LocalID, mock.Anything). Return().Once() s.callbacks.On("UUIDCollectionNSEvent", database.CollectionTokenTransfers, core.ChangeEventTypeUpdated, transfer.Namespace, transfer.LocalID, mock.Anything). Return().Once() - err := s.UpsertTokenTransfer(ctx, transfer) + existing, err := s.InsertOrGetTokenTransfer(ctx, transfer) assert.NoError(t, err) + assert.Nil(t, existing) assert.NotNil(t, transfer.Created) transferJson, _ := json.Marshal(&transfer) @@ -76,7 +82,7 @@ func TestTokenTransferE2EWithDB(t *testing.T) { assert.Equal(t, string(transferJson), string(transferReadJson)) // Query back the token transfer (by protocol ID) - transferRead, err = s.GetTokenTransferByProtocolID(ctx, "ns1", transfer.Connector, transfer.ProtocolID) + transferRead, err = s.GetTokenTransferByProtocolID(ctx, "ns1", transfer.Pool, transfer.ProtocolID) assert.NoError(t, err) assert.NotNil(t, transferRead) transferReadJson, _ = json.Marshal(&transferRead) @@ -99,70 +105,52 @@ func TestTokenTransferE2EWithDB(t *testing.T) { transferReadJson, _ = json.Marshal(transfers[0]) assert.Equal(t, string(transferJson), string(transferReadJson)) - // Update the token transfer - transfer.Type = core.TokenTransferTypeMint - transfer.Amount.Int().SetInt64(1) - transfer.To = "0x03" - err = s.UpsertTokenTransfer(ctx, transfer) - assert.NoError(t, err) - - // Query back the token transfer (by ID) - transferRead, err = s.GetTokenTransferByID(ctx, "ns1", transfer.LocalID) + // Delete the token transfer + err = s.DeleteTokenTransfers(ctx, "ns1", transfer.Pool) assert.NoError(t, err) - assert.NotNil(t, transferRead) - transferJson, _ = json.Marshal(&transfer) - transferReadJson, _ = json.Marshal(&transferRead) - assert.Equal(t, string(transferJson), string(transferReadJson)) } -func TestUpsertTokenTransferFailBegin(t *testing.T) { +func TestInsertOrGetTokenTransferFailBegin(t *testing.T) { s, mock := newMockProvider().init() mock.ExpectBegin().WillReturnError(fmt.Errorf("pop")) - err := s.UpsertTokenTransfer(context.Background(), &core.TokenTransfer{}) + existing, err := s.InsertOrGetTokenTransfer(context.Background(), &core.TokenTransfer{}) assert.Regexp(t, "FF00175", err) + assert.Nil(t, existing) assert.NoError(t, mock.ExpectationsWereMet()) } -func TestUpsertTokenTransferFailSelect(t *testing.T) { - s, mock := newMockProvider().init() - mock.ExpectBegin() - mock.ExpectQuery("SELECT .*").WillReturnError(fmt.Errorf("pop")) - err := s.UpsertTokenTransfer(context.Background(), &core.TokenTransfer{}) - assert.Regexp(t, "FF00176", err) - assert.NoError(t, mock.ExpectationsWereMet()) -} - -func TestUpsertTokenTransferFailInsert(t *testing.T) { +func TestInsertOrGetTokenTransferFailSelect(t *testing.T) { s, mock := newMockProvider().init() mock.ExpectBegin() - mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{})) mock.ExpectExec("INSERT .*").WillReturnError(fmt.Errorf("pop")) + mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{})) mock.ExpectRollback() - err := s.UpsertTokenTransfer(context.Background(), &core.TokenTransfer{}) + existing, err := s.InsertOrGetTokenTransfer(context.Background(), &core.TokenTransfer{}) assert.Regexp(t, "FF00177", err) + assert.Nil(t, existing) assert.NoError(t, mock.ExpectationsWereMet()) } -func TestUpsertTokenTransferFailUpdate(t *testing.T) { - s, mock := newMockProvider().init() - mock.ExpectBegin() - mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{"protocolid"}).AddRow("1")) - mock.ExpectExec("UPDATE .*").WillReturnError(fmt.Errorf("pop")) - mock.ExpectRollback() - err := s.UpsertTokenTransfer(context.Background(), &core.TokenTransfer{}) - assert.Regexp(t, "FF00178", err) - assert.NoError(t, mock.ExpectationsWereMet()) -} +func TestInsertOrGetTokenTransferExisting(t *testing.T) { + s, cleanup := newSQLiteTestProvider(t) + defer cleanup() + ctx := context.Background() -func TestUpsertTokenTransferFailCommit(t *testing.T) { - s, mock := newMockProvider().init() - mock.ExpectBegin() - mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{"protocolid"})) - mock.ExpectExec("INSERT .*").WillReturnResult(sqlmock.NewResult(1, 1)) - mock.ExpectCommit().WillReturnError(fmt.Errorf("pop")) - err := s.UpsertTokenTransfer(context.Background(), &core.TokenTransfer{}) - assert.Regexp(t, "FF00180", err) - assert.NoError(t, mock.ExpectationsWereMet()) + // Create a new token transfer entry + transfer := newTestTransfer() + + s.callbacks.On("UUIDCollectionNSEvent", database.CollectionTokenTransfers, core.ChangeEventTypeCreated, transfer.Namespace, transfer.LocalID, mock.Anything). + Return().Once() + s.callbacks.On("UUIDCollectionNSEvent", database.CollectionTokenTransfers, core.ChangeEventTypeUpdated, transfer.Namespace, transfer.LocalID, mock.Anything). + Return().Once() + + existing, err := s.InsertOrGetTokenTransfer(ctx, transfer) + assert.NoError(t, err) + assert.Nil(t, existing) + + existing, err = s.InsertOrGetTokenTransfer(ctx, transfer) + assert.NoError(t, err) + assert.NotNil(t, existing) } func TestGetTokenTransferByIDSelectFail(t *testing.T) { @@ -214,3 +202,21 @@ func TestGetTokenTransfersScanFail(t *testing.T) { assert.Regexp(t, "FF10121", err) assert.NoError(t, mock.ExpectationsWereMet()) } + +func TestDeleteTokenTransfersFailBegin(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectBegin().WillReturnError(fmt.Errorf("pop")) + err := s.DeleteTokenTransfers(context.Background(), "ns1", fftypes.NewUUID()) + assert.Regexp(t, "FF00175", err) + assert.NoError(t, mock.ExpectationsWereMet()) +} + +func TestDeleteTokenTransfersFailDelete(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectBegin() + mock.ExpectExec("DELETE .*").WillReturnError(fmt.Errorf("pop")) + mock.ExpectRollback() + err := s.DeleteTokenTransfers(context.Background(), "ns1", fftypes.NewUUID()) + assert.Regexp(t, "FF00179", err) + assert.NoError(t, mock.ExpectationsWereMet()) +} diff --git a/internal/database/sqlcommon/transaction_sql.go b/internal/database/sqlcommon/transaction_sql.go index ac83ceabbf..647685b5be 100644 --- a/internal/database/sqlcommon/transaction_sql.go +++ b/internal/database/sqlcommon/transaction_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2023 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -21,6 +21,7 @@ import ( "database/sql" sq "github.com/Masterminds/squirrel" + "github.com/hyperledger/firefly-common/pkg/dbsql" "github.com/hyperledger/firefly-common/pkg/ffapi" "github.com/hyperledger/firefly-common/pkg/fftypes" "github.com/hyperledger/firefly-common/pkg/i18n" @@ -57,26 +58,22 @@ func (e *IdempotencyError) Error() string { return e.OriginalError.Error() } -func (s *SQLCommon) InsertTransaction(ctx context.Context, transaction *core.Transaction) (err error) { - ctx, tx, autoCommit, err := s.BeginOrUseTx(ctx) - if err != nil { - return err - } - defer s.RollbackTx(ctx, tx, autoCommit) +func (s *SQLCommon) setTransactionInsertValues(query sq.InsertBuilder, transaction *core.Transaction) sq.InsertBuilder { + return query.Values( + transaction.ID, + string(transaction.Type), + transaction.Namespace, + transaction.Created, + transaction.IdempotencyKey, + transaction.BlockchainIDs, + ) +} +func (s *SQLCommon) attemptInsertTxnWithIdempotencyCheck(ctx context.Context, tx *dbsql.TXWrapper, transaction *core.Transaction) (isIdempotencyErr bool, err error) { transaction.Created = fftypes.Now() var seq int64 if seq, err = s.InsertTxExt(ctx, transactionsTable, tx, - sq.Insert(transactionsTable). - Columns(transactionColumns...). - Values( - transaction.ID, - string(transaction.Type), - transaction.Namespace, - transaction.Created, - transaction.IdempotencyKey, - transaction.BlockchainIDs, - ), + s.setTransactionInsertValues(sq.Insert(transactionsTable).Columns(transactionColumns...), transaction), func() { s.callbacks.UUIDCollectionNSEvent(database.CollectionTransactions, core.ChangeEventTypeCreated, transaction.Namespace, transaction.ID) }, @@ -88,18 +85,84 @@ func (s *SQLCommon) InsertTransaction(ctx context.Context, transaction *core.Tra existing, _, _ := s.GetTransactions(ctx, transaction.Namespace, fb.Eq("idempotencykey", (string)(transaction.IdempotencyKey))) if len(existing) > 0 { newErr := &IdempotencyError{existing[0].ID, i18n.NewError(ctx, coremsgs.MsgIdempotencyKeyDuplicateTransaction, transaction.IdempotencyKey, existing[0].ID)} - return newErr + return true, newErr } else if err == nil { // If we don't have an error, and we didn't find an existing idempotency key match, then we don't know the reason for the conflict - return i18n.NewError(ctx, coremsgs.MsgNonIdempotencyKeyConflictTxInsert, transaction.ID, transaction.IdempotencyKey) + return false, i18n.NewError(ctx, coremsgs.MsgNonIdempotencyKeyConflictTxInsert, transaction.ID, transaction.IdempotencyKey) } } + return false, err + } + return false, nil +} + +func (s *SQLCommon) InsertTransaction(ctx context.Context, transaction *core.Transaction) (err error) { + ctx, tx, autoCommit, err := s.BeginOrUseTx(ctx) + if err != nil { + return err + } + defer s.RollbackTx(ctx, tx, autoCommit) + + if _, err := s.attemptInsertTxnWithIdempotencyCheck(ctx, tx, transaction); err != nil { return err } return s.CommitTx(ctx, tx, autoCommit) } +func (s *SQLCommon) InsertTransactions(ctx context.Context, txns []*core.Transaction) (err error) { + + ctx, tx, autoCommit, err := s.BeginOrUseTx(ctx) + if err != nil { + return err + } + // It does not make sense to invoke this function outside of a wider transaction boundary. + // It relies on being able to return an idempotency error, without calling commit, after inserting some of the transactions + if !autoCommit { + s.RollbackTx(ctx, tx, autoCommit) + return i18n.NewError(ctx, coremsgs.MsgSQLInsertManyOutsideTransaction) + } + + if s.Features().MultiRowInsert { + query := sq.Insert(transactionsTable).Columns(transactionColumns...) + for _, txn := range txns { + txn.Created = fftypes.Now() + query = s.setTransactionInsertValues(query, txn) + } + sequences := make([]int64, len(txns)) + + // Use a single multi-row insert for the messages + err := s.InsertTxRows(ctx, transactionsTable, tx, query, func() { + for _, txn := range txns { + s.callbacks.UUIDCollectionNSEvent(database.CollectionTransactions, core.ChangeEventTypeCreated, txn.Namespace, txn.ID) + } + }, sequences, true /* we want the caller to be able to retry with individual upserts */) + if err != nil { + return err + } + } else { + // Fall back to individual inserts grouped in a TX, where if one fails for idempotency error we + // return the error, but the others get inserted. + var idempotencyErr error + for _, txn := range txns { + isIdempotencyErr, txnErr := s.attemptInsertTxnWithIdempotencyCheck(ctx, tx, txn) + if txnErr != nil { + log.L(ctx).Errorf("Insert failed for tx=%s:%s idempotencyKey=%s: %s", txn.Namespace, txn.ID, txn.IdempotencyKey, txnErr) + if !isIdempotencyErr { + return txnErr + } + idempotencyErr = txnErr + } + } + if idempotencyErr != nil { + return idempotencyErr + } + } + + return nil + +} + func (s *SQLCommon) transactionResult(ctx context.Context, row *sql.Rows) (*core.Transaction, error) { var transaction core.Transaction err := row.Scan( @@ -163,7 +226,7 @@ func (s *SQLCommon) GetTransactions(ctx context.Context, namespace string, filte transactions = append(transactions, transaction) } - return transactions, s.QueryRes(ctx, transactionsTable, tx, fop, fi), err + return transactions, s.QueryRes(ctx, transactionsTable, tx, fop, nil, fi), err } diff --git a/internal/database/sqlcommon/transaction_sql_test.go b/internal/database/sqlcommon/transaction_sql_test.go index 4d378834bb..e28efcdc81 100644 --- a/internal/database/sqlcommon/transaction_sql_test.go +++ b/internal/database/sqlcommon/transaction_sql_test.go @@ -18,6 +18,7 @@ package sqlcommon import ( "context" + "database/sql/driver" "encoding/json" "fmt" "testing" @@ -108,6 +109,67 @@ func TestTransactionE2EWithDB(t *testing.T) { assert.Equal(t, (core.IdempotencyKey)("testKey"), transactions[0].IdempotencyKey) } +func TestTransactionE2EInsertManyIdempotency(t *testing.T) { + + s, cleanup := newSQLiteTestProvider(t) + defer cleanup() + ctx := context.Background() + + // Create a new transaction entry + txns := make([]*core.Transaction, 5) + txnsByIdem := make(map[string]*core.Transaction) + for i := 0; i < len(txns); i++ { + idem := fmt.Sprintf("idem_%d", i) + txns[i] = &core.Transaction{ + ID: fftypes.NewUUID(), + Type: core.TransactionTypeBatchPin, + Namespace: "ns1", + BlockchainIDs: fftypes.FFStringArray{"tx1"}, + IdempotencyKey: core.IdempotencyKey(idem), + } + txnsByIdem[idem] = txns[i] + } + + s.callbacks.On("UUIDCollectionNSEvent", database.CollectionTransactions, core.ChangeEventTypeCreated, "ns1", mock.Anything, mock.Anything).Return() + + // Insert one transaction directly, with a conflicting idempotency key (different UUID) + existingTxn := &core.Transaction{ + ID: fftypes.NewUUID(), + Type: core.TransactionTypeBatchPin, + Namespace: "ns1", + BlockchainIDs: fftypes.FFStringArray{"tx1"}, + IdempotencyKey: core.IdempotencyKey("idem_2"), + } + err := s.InsertTransaction(ctx, existingTxn) + assert.NoError(t, err) + + // Insert the whole set + err = s.RunAsGroup(ctx, func(ctx context.Context) error { + err := s.InsertTransactions(ctx, txns) + assert.Regexp(t, "FF10431.*idem_2", err) + return nil + }) + assert.NoError(t, err) + + // Check we find every transaction + fb := database.TransactionQueryFactory.NewFilter(ctx) + idempotencyKeys := make([]driver.Value, len(txns)) + for i := 0; i < len(txns); i++ { + idempotencyKeys[i] = fmt.Sprintf("idem_%d", i) + } + resolvedTX, _, err := s.GetTransactions(ctx, "ns1", fb.In("idempotencykey", idempotencyKeys)) + assert.NoError(t, err) + assert.Len(t, resolvedTX, len(txns)) + for _, txn := range resolvedTX { + if txn.IdempotencyKey == "idem_2" { + assert.Equal(t, existingTxn.ID, txn.ID) + } else { + assert.Equal(t, txnsByIdem[string(txn.IdempotencyKey)].ID, txn.ID) + } + } + +} + func TestInsertTransactionFailBegin(t *testing.T) { s, mock := newMockProvider().init() mock.ExpectBegin().WillReturnError(fmt.Errorf("pop")) @@ -228,3 +290,75 @@ func TestTransactionUpdateFail(t *testing.T) { err := s.UpdateTransaction(context.Background(), "ns1", fftypes.NewUUID(), u) assert.Regexp(t, "FF00178", err) } + +func TestInsertTransactionsBeginFail(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectBegin().WillReturnError(fmt.Errorf("pop")) + err := s.InsertTransactions(context.Background(), []*core.Transaction{}) + assert.Regexp(t, "FF00175", err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + +func TestInsertTransactionsMultiRowOK(t *testing.T) { + s := newMockProvider() + s.multiRowInsert = true + s.fakePSQLInsert = true + s, mock := s.init() + + tx1 := &core.Transaction{ID: fftypes.NewUUID(), Namespace: "ns1"} + tx2 := &core.Transaction{ID: fftypes.NewUUID(), Namespace: "ns1"} + s.callbacks.On("UUIDCollectionNSEvent", database.CollectionTransactions, core.ChangeEventTypeCreated, "ns1", tx1.ID) + s.callbacks.On("UUIDCollectionNSEvent", database.CollectionTransactions, core.ChangeEventTypeCreated, "ns1", tx2.ID) + + mock.ExpectBegin() + mock.ExpectQuery("INSERT.*transactions").WillReturnRows(sqlmock.NewRows([]string{s.SequenceColumn()}). + AddRow(int64(1001)). + AddRow(int64(1002)), + ) + mock.ExpectCommit() + err := s.RunAsGroup(context.Background(), func(ctx context.Context) error { + return s.InsertTransactions(ctx, []*core.Transaction{tx1, tx2}) + }) + assert.NoError(t, err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + +func TestInsertTransactionsOutsideTXFail(t *testing.T) { + s := newMockProvider() + s, mock := s.init() + mock.ExpectBegin() + tx1 := &core.Transaction{ID: fftypes.NewUUID(), Namespace: "ns1"} + err := s.InsertTransactions(context.Background(), []*core.Transaction{tx1}) + assert.Regexp(t, "FF10456", err) +} + +func TestInsertTransactionsMultiRowFail(t *testing.T) { + s := newMockProvider() + s.multiRowInsert = true + s.fakePSQLInsert = true + s, mock := s.init() + tx1 := &core.Transaction{ID: fftypes.NewUUID(), Namespace: "ns1"} + mock.ExpectBegin() + mock.ExpectQuery("INSERT.*").WillReturnError(fmt.Errorf("pop")) + err := s.RunAsGroup(context.Background(), func(ctx context.Context) error { + return s.InsertTransactions(ctx, []*core.Transaction{tx1}) + }) + assert.Regexp(t, "FF00177", err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + +func TestInsertTransactionsSingleRowFail(t *testing.T) { + s, mock := newMockProvider().init() + tx1 := &core.Transaction{ID: fftypes.NewUUID(), Namespace: "ns1"} + mock.ExpectBegin() + mock.ExpectExec("INSERT.*").WillReturnError(fmt.Errorf("pop")) + err := s.RunAsGroup(context.Background(), func(ctx context.Context) error { + return s.InsertTransactions(ctx, []*core.Transaction{tx1}) + }) + assert.Regexp(t, "FF00177", err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} diff --git a/internal/database/sqlcommon/verifier_sql.go b/internal/database/sqlcommon/verifier_sql.go index c819b90326..af3dd80956 100644 --- a/internal/database/sqlcommon/verifier_sql.go +++ b/internal/database/sqlcommon/verifier_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -193,6 +193,6 @@ func (s *SQLCommon) GetVerifiers(ctx context.Context, namespace string, filter f verifiers = append(verifiers, d) } - return verifiers, s.QueryRes(ctx, verifiersTable, tx, fop, fi), err + return verifiers, s.QueryRes(ctx, verifiersTable, tx, fop, nil, fi), err } diff --git a/internal/dataexchange/ffdx/config.go b/internal/dataexchange/ffdx/config.go index 404d75bb8a..776cce1f4e 100644 --- a/internal/dataexchange/ffdx/config.go +++ b/internal/dataexchange/ffdx/config.go @@ -32,6 +32,14 @@ const ( DataExchangeEventRetryInitialDelay = "eventRetry.initialDelay" DataExchangeEventRetryMaxDelay = "eventRetry.maxDelay" DataExchangeEventRetryFactor = "eventRetry.factor" + + DataExchangeBackgroundStart = "backgroundStart.enabled" + DataExchangeBackgroundStartInitialDelay = "backgroundStart.initialDelay" + DataExchangeBackgroundStartMaxDelay = "backgroundStart.maxDelay" + DataExchangeBackgroundStartFactor = "backgroundStart.factor" + defaultBackgroundInitialDelay = "5s" + defaultBackgroundRetryFactor = 2.0 + defaultBackgroundMaxDelay = "1m" ) func (h *FFDX) InitConfig(config config.Section) { @@ -41,4 +49,8 @@ func (h *FFDX) InitConfig(config config.Section) { config.AddKnownKey(DataExchangeEventRetryInitialDelay, 50*time.Millisecond) config.AddKnownKey(DataExchangeEventRetryMaxDelay, 30*time.Second) config.AddKnownKey(DataExchangeEventRetryFactor, 2.0) + config.AddKnownKey(DataExchangeBackgroundStart, false) + config.AddKnownKey(DataExchangeBackgroundStartInitialDelay, defaultBackgroundInitialDelay) + config.AddKnownKey(DataExchangeBackgroundStartMaxDelay, defaultBackgroundMaxDelay) + config.AddKnownKey(DataExchangeBackgroundStartFactor, defaultBackgroundRetryFactor) } diff --git a/internal/dataexchange/ffdx/ffdx.go b/internal/dataexchange/ffdx/ffdx.go index db4581d80e..00c63dd53a 100644 --- a/internal/dataexchange/ffdx/ffdx.go +++ b/internal/dataexchange/ffdx/ffdx.go @@ -1,4 +1,4 @@ -// Copyright © 2023 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -40,18 +40,20 @@ import ( const DXIDSeparator = "/" type FFDX struct { - ctx context.Context - cancelCtx context.CancelFunc - capabilities *dataexchange.Capabilities - callbacks callbacks - client *resty.Client - wsconn wsclient.WSClient - needsInit bool - initialized bool - initMutex sync.Mutex - nodes map[string]*dxNode - ackChannel chan *ack - retry *retry.Retry + ctx context.Context + cancelCtx context.CancelFunc + capabilities *dataexchange.Capabilities + callbacks callbacks + client *resty.Client + wsconn wsclient.WSClient + needsInit bool + initialized bool + initMutex sync.Mutex + nodes map[string]*dxNode + ackChannel chan *ack + retry *retry.Retry + backgroundStart bool + backgroundRetry *retry.Retry } type dxNode struct { @@ -182,7 +184,11 @@ func (h *FFDX) Init(ctx context.Context, cancelCtx context.CancelFunc, config co return i18n.NewError(ctx, coremsgs.MsgMissingPluginConfig, "url", "dataexchange.ffdx") } - h.client, err = ffresty.New(h.ctx, config) + wsConfig, err := wsclient.GenerateConfig(ctx, config) + if err == nil { + h.client, err = ffresty.New(h.ctx, config) + } + if err != nil { return err } @@ -196,11 +202,6 @@ func (h *FFDX) Init(ctx context.Context, cancelCtx context.CancelFunc, config co Factor: config.GetFloat64(DataExchangeEventRetryFactor), } - wsConfig, err := wsclient.GenerateConfig(ctx, config) - if err != nil { - return err - } - if wsConfig.WSKeyPath == "" { wsConfig.WSKeyPath = "/ws" } @@ -209,6 +210,18 @@ func (h *FFDX) Init(ctx context.Context, cancelCtx context.CancelFunc, config co if err != nil { return err } + + h.backgroundStart = config.GetBool(DataExchangeBackgroundStart) + + if h.backgroundStart { + h.backgroundRetry = &retry.Retry{ + InitialDelay: config.GetDuration(DataExchangeBackgroundStartInitialDelay), + MaximumDelay: config.GetDuration(DataExchangeBackgroundStartMaxDelay), + Factor: config.GetFloat64(DataExchangeBackgroundStartFactor), + } + return nil + } + go h.eventLoop() go h.ackLoop() return nil @@ -218,16 +231,41 @@ func (h *FFDX) SetHandler(networkNamespace, nodeName string, handler dataexchang h.callbacks.writeLock.Lock() defer h.callbacks.writeLock.Unlock() key := networkNamespace + ":" + nodeName - h.callbacks.handlers[key] = handler + if handler == nil { + delete(h.callbacks.handlers, key) + } else { + h.callbacks.handlers[key] = handler + } } func (h *FFDX) SetOperationHandler(namespace string, handler core.OperationCallbacks) { h.callbacks.writeLock.Lock() defer h.callbacks.writeLock.Unlock() - h.callbacks.opHandlers[namespace] = handler + if handler == nil { + delete(h.callbacks.opHandlers, namespace) + } else { + h.callbacks.opHandlers[namespace] = handler + } +} + +func (h *FFDX) backgroundStartLoop() { + _ = h.backgroundRetry.Do(h.ctx, fmt.Sprintf("Background start %s", h.Name()), func(attempt int) (retry bool, err error) { + err = h.wsconn.Connect() + if err != nil { + return true, err + } + + go h.eventLoop() + go h.ackLoop() + return false, nil + }) } func (h *FFDX) Start() error { + if h.backgroundStart { + go h.backgroundStartLoop() + return nil + } return h.wsconn.Connect() } @@ -235,7 +273,7 @@ func (h *FFDX) Capabilities() *dataexchange.Capabilities { return h.capabilities } -func (h *FFDX) beforeConnect(ctx context.Context) error { +func (h *FFDX) beforeConnect(ctx context.Context, w wsclient.WSClient) error { h.initMutex.Lock() defer h.initMutex.Unlock() diff --git a/internal/dataexchange/ffdx/ffdx_test.go b/internal/dataexchange/ffdx/ffdx_test.go index 4693876d6b..63efbcd8d0 100644 --- a/internal/dataexchange/ffdx/ffdx_test.go +++ b/internal/dataexchange/ffdx/ffdx_test.go @@ -28,7 +28,9 @@ import ( "github.com/hyperledger/firefly-common/pkg/config" "github.com/hyperledger/firefly-common/pkg/ffresty" + "github.com/hyperledger/firefly-common/pkg/fftls" "github.com/hyperledger/firefly-common/pkg/fftypes" + "github.com/hyperledger/firefly-common/pkg/retry" "github.com/hyperledger/firefly-common/pkg/wsclient" "github.com/hyperledger/firefly/internal/coreconfig" "github.com/hyperledger/firefly/mocks/coremocks" @@ -116,6 +118,19 @@ func TestInitBadURL(t *testing.T) { assert.Regexp(t, "FF00149", err) } +func TestInitBadTLS(t *testing.T) { + coreconfig.Reset() + h := &FFDX{} + h.InitConfig(utConfig) + utConfig.Set(ffresty.HTTPConfigURL, "http://localhost:12345") + tlsConfig := utConfig.SubSection("tls") + tlsConfig.Set(fftls.HTTPConfTLSEnabled, true) + tlsConfig.Set(fftls.HTTPConfTLSCAFile, "badCA") + ctx, cancel := context.WithCancel(context.Background()) + err := h.Init(ctx, cancel, utConfig) + assert.Regexp(t, "FF00153", err) +} + func TestInitMissingURL(t *testing.T) { coreconfig.Reset() h := &FFDX{} @@ -137,6 +152,19 @@ func acker() func(args mock.Arguments) { } } +func TestInitWithBackgroundStart(t *testing.T) { + h, _, _, _, done := newTestFFDX(t, false) + defer done() + utConfig.Set(DataExchangeBackgroundStart, true) + + h.InitConfig(utConfig) + ctx, cancel := context.WithCancel(context.Background()) + err := h.Init(ctx, cancel, utConfig) + assert.NoError(t, err) + + assert.NotNil(t, h.backgroundRetry) +} + func manifestAcker(manifest string) func(args mock.Arguments) { return func(args mock.Arguments) { args[1].(dataexchange.DXEvent).AckWithManifest(manifest) @@ -408,6 +436,107 @@ func TestBadEvents(t *testing.T) { } +func TestBackgroundStartWSFail(t *testing.T) { + h := &FFDX{initialized: true} + coreconfig.Reset() + + u, _ := url.Parse("http://localhost:12345") + u.Scheme = "http" + httpURL := u.String() + + h.InitConfig(utConfig) + + utConfig.Set(ffresty.HTTPConfigURL, httpURL) + utConfig.Set(wsclient.WSConfigKeyInitialConnectAttempts, 1) + utConfig.Set(DataExchangeBackgroundStart, true) + h.InitConfig(utConfig) + + dxCtx, dxCancel := context.WithCancel(context.Background()) + defer dxCancel() + err := h.Init(dxCtx, dxCancel, utConfig) + assert.NoError(t, err) + assert.Equal(t, "ffdx", h.Name()) + assert.NotNil(t, h.Capabilities()) + + capturedErr := make(chan error) + h.backgroundRetry = &retry.Retry{ + ErrCallback: func(err error) { + capturedErr <- err + }, + } + + err = h.Start() + assert.NoError(t, err) + + err = <-capturedErr + assert.Regexp(t, "FF00148", err) +} + +func TestMessageEventsBackgroundStart(t *testing.T) { + + h, toServer, fromServer, _, done := newTestFFDX(t, false) + defer done() + + // Starting in background mode and making sure the event loop are started as well + // to listen to messages + utConfig.Set(DataExchangeBackgroundStart, true) + h.Init(h.ctx, h.cancelCtx, utConfig) + + mcb := &dataexchangemocks.Callbacks{} + h.SetHandler("ns1", "node1", mcb) + ocb := &coremocks.OperationCallbacks{} + h.SetOperationHandler("ns1", ocb) + h.AddNode(context.Background(), "ns1", "node1", fftypes.JSONObject{"id": "peer1"}) + + err := h.Start() + assert.NoError(t, err) + + namespacedID1 := fmt.Sprintf("ns1:%s", fftypes.NewUUID()) + ocb.On("OperationUpdate", mock.MatchedBy(func(ev *core.OperationUpdate) bool { + return ev.NamespacedOpID == namespacedID1 && + ev.Status == core.OpStatusFailed && + ev.ErrorMessage == "pop" && + ev.Plugin == "ffdx" + })).Run(opAcker()).Return(nil) + fromServer <- `{"id":"1","type":"message-failed","requestID":"` + namespacedID1 + `","error":"pop"}` + msg := <-toServer + assert.Equal(t, `{"action":"ack","id":"1"}`, string(msg)) + + namespacedID2 := fmt.Sprintf("ns1:%s", fftypes.NewUUID()) + ocb.On("OperationUpdate", mock.MatchedBy(func(ev *core.OperationUpdate) bool { + return ev.NamespacedOpID == namespacedID2 && + ev.Status == core.OpStatusSucceeded && + ev.Plugin == "ffdx" + })).Run(opAcker()).Return(nil) + fromServer <- `{"id":"2","type":"message-delivered","requestID":"` + namespacedID2 + `"}` + msg = <-toServer + assert.Equal(t, `{"action":"ack","id":"2"}`, string(msg)) + + namespacedID3 := fmt.Sprintf("ns1:%s", fftypes.NewUUID()) + ocb.On("OperationUpdate", mock.MatchedBy(func(ev *core.OperationUpdate) bool { + return ev.NamespacedOpID == namespacedID3 && + ev.Status == core.OpStatusSucceeded && + ev.DXManifest == `{"manifest":true}` && + ev.Output.String() == `{"signatures":"and stuff"}` && + ev.Plugin == "ffdx" + })).Run(opAcker()).Return(nil) + fromServer <- `{"id":"3","type":"message-acknowledged","requestID":"` + namespacedID3 + `","info":{"signatures":"and stuff"},"manifest":"{\"manifest\":true}"}` + msg = <-toServer + assert.Equal(t, `{"action":"ack","id":"3"}`, string(msg)) + + mcb.On("DXEvent", h, mock.MatchedBy(func(ev dataexchange.DXEvent) bool { + return ev.EventID() == "4" && + ev.Type() == dataexchange.DXEventTypeMessageReceived && + ev.MessageReceived().PeerID == "peer2" + })).Run(manifestAcker(`{"manifest":true}`)).Return(nil) + fromServer <- `{"id":"4","type":"message-received","sender":"peer2","recipient":"peer1","message":"{\"batch\":{\"namespace\":\"ns1\"}}"}` + msg = <-toServer + assert.Equal(t, `{"action":"ack","id":"4","manifest":"{\"manifest\":true}"}`, string(msg)) + + mcb.AssertExpectations(t) + ocb.AssertExpectations(t) +} + func TestMessageEvents(t *testing.T) { h, toServer, fromServer, _, done := newTestFFDX(t, false) @@ -464,6 +593,11 @@ func TestMessageEvents(t *testing.T) { msg = <-toServer assert.Equal(t, `{"action":"ack","id":"4","manifest":"{\"manifest\":true}"}`, string(msg)) + h.SetHandler("ns1", "node1", nil) + assert.Empty(t, h.callbacks.handlers) + h.SetOperationHandler("ns1", nil) + assert.Empty(t, h.callbacks.opHandlers) + mcb.AssertExpectations(t) ocb.AssertExpectations(t) } diff --git a/internal/definitions/handler_contracts.go b/internal/definitions/handler_contracts.go index 6a0b71fde2..f5f8df2a1b 100644 --- a/internal/definitions/handler_contracts.go +++ b/internal/definitions/handler_contracts.go @@ -18,6 +18,7 @@ package definitions import ( "context" + "fmt" "github.com/hyperledger/firefly-common/pkg/fftypes" "github.com/hyperledger/firefly-common/pkg/i18n" @@ -27,14 +28,38 @@ import ( "github.com/hyperledger/firefly/pkg/database" ) -func (dh *definitionHandler) persistFFI(ctx context.Context, ffi *fftypes.FFI) (retry bool, err error) { - if err = dh.contracts.ResolveFFI(ctx, ffi); err != nil { - return false, i18n.WrapError(ctx, err, coremsgs.MsgDefRejectedValidateFail, "contract interface", ffi.ID) - } +func (dh *definitionHandler) persistFFI(ctx context.Context, ffi *fftypes.FFI, isAuthor bool) (retry bool, err error) { + for i := 1; ; i++ { + if err = dh.contracts.ResolveFFI(ctx, ffi); err != nil { + return false, i18n.WrapError(ctx, err, coremsgs.MsgDefRejectedValidateFail, "contract interface", ffi.ID) + } - err = dh.database.UpsertFFI(ctx, ffi) - if err != nil { - return true, err + // Check if this conflicts with an existing FFI + existing, err := dh.database.InsertOrGetFFI(ctx, ffi) + if err != nil { + return true, err + } + + if existing == nil { + // No conflict - new FFI was inserted successfully + break + } + + if ffi.Published { + if existing.ID.Equals(ffi.ID) { + // ID conflict - check if this matches (or should overwrite) the existing record + return dh.reconcilePublishedFFI(ctx, existing, ffi, isAuthor) + } + + if existing.Name == ffi.Name && existing.Version == ffi.Version { + // Local name conflict - generate a unique name and try again + ffi.Name = fmt.Sprintf("%s-%d", ffi.NetworkName, i) + continue + } + } + + // Any other conflict - reject + return false, i18n.NewError(ctx, coremsgs.MsgDefRejectedConflict, "contract interface", ffi.ID, existing.ID) } for _, method := range ffi.Methods { @@ -56,38 +81,120 @@ func (dh *definitionHandler) persistFFI(ctx context.Context, ffi *fftypes.FFI) ( return false, nil } -func (dh *definitionHandler) persistContractAPI(ctx context.Context, httpServerURL string, api *core.ContractAPI) (retry bool, err error) { - if err := dh.contracts.ResolveContractAPI(ctx, httpServerURL, api); err != nil { - return false, i18n.WrapError(ctx, err, coremsgs.MsgDefRejectedValidateFail, "contract API", api.ID) +func (dh *definitionHandler) reconcilePublishedFFI(ctx context.Context, existing, ffi *fftypes.FFI, isAuthor bool) (retry bool, err error) { + if existing.Message.Equals(ffi.Message) { + // Message already recorded + return false, nil } - err = dh.database.UpsertContractAPI(ctx, api) - if err != nil { - if err == database.IDMismatch { - return false, i18n.NewError(ctx, coremsgs.MsgDefRejectedIDMismatch, "contract API", api.ID) + if existing.Message == nil && isAuthor { + // FFI was previously unpublished - if it was now published by this node, upsert the new version + ffi.Name = existing.Name + if err := dh.database.UpsertFFI(ctx, ffi, database.UpsertOptimizationExisting); err != nil { + return true, err } - return true, err + return false, nil + } + + return false, i18n.NewError(ctx, coremsgs.MsgDefRejectedConflict, "contract interface", ffi.ID, existing.ID) +} + +func (dh *definitionHandler) persistContractAPI(ctx context.Context, httpServerURL string, api *core.ContractAPI, isAuthor bool) (retry bool, err error) { + l := log.L(ctx) + for i := 1; ; i++ { + if err := dh.contracts.ResolveContractAPI(ctx, httpServerURL, api); err != nil { + return false, i18n.WrapError(ctx, err, coremsgs.MsgDefRejectedValidateFail, "contract API", api.ID) + } + + // Check if this conflicts with an existing API + existing, err := dh.database.InsertOrGetContractAPI(ctx, api) + if err != nil { + l.Errorf("Failed to InsertOrGetContractAPI due to err: %+v", err) + return true, err + } + + if existing == nil { + // No conflict - new API was inserted successfully + l.Tracef("Successfully inserted the new contract API with ID %s", api.ID) + break + } + + if existing.ID.Equals(api.ID) { + // the matching record has the same ID, perform an update + l.Trace("Found an existing contract API with the same ID, reconciling the contract API") + // ID conflict - check if this matches (or should overwrite) the existing record + return dh.reconcileContractAPI(ctx, existing, api, isAuthor) + } else if api.Published { + + if existing.Name == api.Name { + l.Trace("Local name conflict, generating a unique name to retry") + // Local name conflict - generate a unique name and try again + api.Name = fmt.Sprintf("%s-%d", api.NetworkName, i) + continue + } + } + + // Any other conflict - reject + return false, i18n.NewError(ctx, coremsgs.MsgDefRejectedConflict, "contract API", api.ID, existing.ID) + } + return false, nil } +func (dh *definitionHandler) reconcileContractAPI(ctx context.Context, existing, api *core.ContractAPI, isAuthor bool) (retry bool, err error) { + l := log.L(ctx) + + if api.Published { + l.Trace("Reconciling a published API") + if existing.Message.Equals(api.Message) { + l.Trace("Reconciling a published API: message already recorded, no action required") + // Message already recorded + return false, nil + } + + if existing.Message == nil && isAuthor { + // API was previously unpublished - if it was now published by this node, upsert the new version + api.Name = existing.Name + l.Tracef("Reconciling a published API: update API name from '%s' to the existing name '%s'", api.Name, existing.Name) + if err := dh.database.UpsertContractAPI(ctx, api, database.UpsertOptimizationExisting); err != nil { + return true, err + } + return false, nil + } + } else { + if err := dh.database.UpsertContractAPI(ctx, api, database.UpsertOptimizationExisting); err != nil { + return true, err + } + return false, nil + } + + return false, i18n.NewError(ctx, coremsgs.MsgDefRejectedConflict, "contract API", api.ID, existing.ID) +} + func (dh *definitionHandler) handleFFIBroadcast(ctx context.Context, state *core.BatchState, msg *core.Message, data core.DataArray, tx *fftypes.UUID) (HandlerResult, error) { var ffi fftypes.FFI if valid := dh.getSystemBroadcastPayload(ctx, msg, data, &ffi); !valid { return HandlerResult{Action: core.ActionReject}, i18n.NewError(ctx, coremsgs.MsgDefRejectedBadPayload, "contract interface", msg.Header.ID) } + + org, err := dh.identity.GetRootOrgDID(ctx) + if err != nil { + return HandlerResult{Action: core.ActionRetry}, err + } + isAuthor := org == msg.Header.Author + ffi.Message = msg.Header.ID - return dh.handleFFIDefinition(ctx, state, &ffi, tx) + ffi.Name = ffi.NetworkName + ffi.Published = true + return dh.handleFFIDefinition(ctx, state, &ffi, tx, isAuthor) } -func (dh *definitionHandler) handleFFIDefinition(ctx context.Context, state *core.BatchState, ffi *fftypes.FFI, tx *fftypes.UUID) (HandlerResult, error) { +func (dh *definitionHandler) handleFFIDefinition(ctx context.Context, state *core.BatchState, ffi *fftypes.FFI, tx *fftypes.UUID, isAuthor bool) (HandlerResult, error) { l := log.L(ctx) - ffi.Namespace = dh.namespace.Name - if err := ffi.Validate(ctx, true); err != nil { - return HandlerResult{Action: core.ActionReject}, i18n.WrapError(ctx, err, coremsgs.MsgDefRejectedValidateFail, "contract interface", ffi.ID) - } - if retry, err := dh.persistFFI(ctx, ffi); err != nil { + ffi.Namespace = dh.namespace.Name + if retry, err := dh.persistFFI(ctx, ffi, isAuthor); err != nil { if retry { return HandlerResult{Action: core.ActionRetry}, err } @@ -107,18 +214,24 @@ func (dh *definitionHandler) handleContractAPIBroadcast(ctx context.Context, sta if valid := dh.getSystemBroadcastPayload(ctx, msg, data, &api); !valid { return HandlerResult{Action: core.ActionReject}, i18n.NewError(ctx, coremsgs.MsgDefRejectedBadPayload, "contract API", msg.Header.ID) } + + org, err := dh.identity.GetRootOrgDID(ctx) + if err != nil { + return HandlerResult{Action: core.ActionRetry}, err + } + isAuthor := org == msg.Header.Author + api.Message = msg.Header.ID - return dh.handleContractAPIDefinition(ctx, state, "", &api, tx) + api.Name = api.NetworkName + api.Published = true + return dh.handleContractAPIDefinition(ctx, state, "", &api, tx, isAuthor) } -func (dh *definitionHandler) handleContractAPIDefinition(ctx context.Context, state *core.BatchState, httpServerURL string, api *core.ContractAPI, tx *fftypes.UUID) (HandlerResult, error) { +func (dh *definitionHandler) handleContractAPIDefinition(ctx context.Context, state *core.BatchState, httpServerURL string, api *core.ContractAPI, tx *fftypes.UUID, isAuthor bool) (HandlerResult, error) { l := log.L(ctx) - api.Namespace = dh.namespace.Name - if err := api.Validate(ctx, true); err != nil { - return HandlerResult{Action: core.ActionReject}, i18n.WrapError(ctx, err, coremsgs.MsgDefRejectedValidateFail, "contract API", api.ID) - } - if retry, err := dh.persistContractAPI(ctx, httpServerURL, api); err != nil { + api.Namespace = dh.namespace.Name + if retry, err := dh.persistContractAPI(ctx, httpServerURL, api, isAuthor); err != nil { if retry { return HandlerResult{Action: core.ActionRetry}, err } diff --git a/internal/definitions/handler_contracts_test.go b/internal/definitions/handler_contracts_test.go index c5c1214df7..6b59ba60ba 100644 --- a/internal/definitions/handler_contracts_test.go +++ b/internal/definitions/handler_contracts_test.go @@ -23,8 +23,6 @@ import ( "testing" "github.com/hyperledger/firefly-common/pkg/fftypes" - "github.com/hyperledger/firefly/mocks/contractmocks" - "github.com/hyperledger/firefly/mocks/databasemocks" "github.com/hyperledger/firefly/pkg/core" "github.com/hyperledger/firefly/pkg/database" "github.com/stretchr/testify/assert" @@ -33,10 +31,12 @@ import ( func testFFI() *fftypes.FFI { return &fftypes.FFI{ - ID: fftypes.NewUUID(), - Namespace: "ns1", - Name: "math", - Version: "v1.0.0", + ID: fftypes.NewUUID(), + Namespace: "ns1", + Name: "math", + NetworkName: "math", + Version: "v1.0.0", + Published: true, Methods: []*fftypes.FFIMethod{ { Name: "sum", @@ -91,9 +91,10 @@ func testFFI() *fftypes.FFI { func testContractAPI() *core.ContractAPI { return &core.ContractAPI{ - ID: fftypes.NewUUID(), - Namespace: "ns1", - Name: "math", + ID: fftypes.NewUUID(), + Namespace: "ns1", + Name: "math", + NetworkName: "math", Interface: &fftypes.FFIReference{ ID: fftypes.NewUUID(), }, @@ -103,6 +104,7 @@ func testContractAPI() *core.ContractAPI { func TestHandleFFIBroadcastOk(t *testing.T) { dh, bs := newTestDefinitionHandler(t) + defer dh.cleanup(t) b, err := json.Marshal(testFFI()) assert.NoError(t, err) @@ -110,48 +112,140 @@ func TestHandleFFIBroadcastOk(t *testing.T) { Value: fftypes.JSONAnyPtrBytes(b), } - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("UpsertFFI", mock.Anything, mock.Anything).Return(nil) - mdi.On("UpsertFFIMethod", mock.Anything, mock.Anything).Return(nil) - mdi.On("UpsertFFIEvent", mock.Anything, mock.Anything).Return(nil) - mdi.On("UpsertFFIError", mock.Anything, mock.Anything).Return(nil) - mdi.On("InsertEvent", mock.Anything, mock.Anything).Return(nil) - mcm := dh.contracts.(*contractmocks.Manager) - mcm.On("ResolveFFI", mock.Anything, mock.Anything).Return(nil) + dh.mdi.On("InsertOrGetFFI", mock.Anything, mock.Anything).Return(nil, nil) + dh.mdi.On("UpsertFFIMethod", mock.Anything, mock.Anything).Return(nil) + dh.mdi.On("UpsertFFIEvent", mock.Anything, mock.Anything).Return(nil) + dh.mdi.On("UpsertFFIError", mock.Anything, mock.Anything).Return(nil) + dh.mdi.On("InsertEvent", mock.Anything, mock.Anything).Return(nil) + dh.mcm.On("ResolveFFI", mock.Anything, mock.Anything).Return(nil) + dh.mim.On("GetRootOrgDID", context.Background()).Return("firefly:org1", nil) action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, &core.Message{ Header: core.MessageHeader{ Tag: core.SystemTagDefineFFI, }, }, core.DataArray{data}, fftypes.NewUUID()) + assert.NoError(t, err) + assert.Equal(t, HandlerResult{Action: core.ActionConfirm}, action) + err = bs.RunFinalize(context.Background()) + assert.NoError(t, err) +} + +func TestHandleFFIBroadcastUpdate(t *testing.T) { + dh, bs := newTestDefinitionHandler(t) + defer dh.cleanup(t) + + ffi := testFFI() + b, err := json.Marshal(ffi) + assert.NoError(t, err) + data := &core.Data{ + Value: fftypes.JSONAnyPtrBytes(b), + } + + existing := &fftypes.FFI{ + ID: ffi.ID, + Message: ffi.Message, + } + + dh.mdi.On("InsertOrGetFFI", mock.Anything, mock.Anything).Return(existing, nil) + dh.mdi.On("InsertEvent", mock.Anything, mock.Anything).Return(nil) + dh.mcm.On("ResolveFFI", mock.Anything, mock.Anything).Return(nil) + dh.mim.On("GetRootOrgDID", context.Background()).Return("firefly:org1", nil) + + action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, &core.Message{ + Header: core.MessageHeader{ + Tag: core.SystemTagDefineFFI, + }, + }, core.DataArray{data}, fftypes.NewUUID()) + assert.NoError(t, err) assert.Equal(t, HandlerResult{Action: core.ActionConfirm}, action) + err = bs.RunFinalize(context.Background()) + assert.NoError(t, err) +} + +func TestHandleFFIBroadcastNameExists(t *testing.T) { + dh, bs := newTestDefinitionHandler(t) + defer dh.cleanup(t) + + ffi := testFFI() + existing := &fftypes.FFI{ + Name: ffi.Name, + Version: ffi.Version, + } + + b, err := json.Marshal(ffi) + assert.NoError(t, err) + data := &core.Data{ + Value: fftypes.JSONAnyPtrBytes(b), + } + + dh.mdi.On("InsertOrGetFFI", mock.Anything, mock.MatchedBy(func(f *fftypes.FFI) bool { + return f.Name == "math" + })).Return(existing, nil) + dh.mdi.On("InsertOrGetFFI", mock.Anything, mock.MatchedBy(func(f *fftypes.FFI) bool { + return f.Name == "math-1" + })).Return(nil, nil) + dh.mdi.On("UpsertFFIMethod", mock.Anything, mock.Anything).Return(nil) + dh.mdi.On("UpsertFFIEvent", mock.Anything, mock.Anything).Return(nil) + dh.mdi.On("UpsertFFIError", mock.Anything, mock.Anything).Return(nil) + dh.mdi.On("InsertEvent", mock.Anything, mock.Anything).Return(nil) + dh.mcm.On("ResolveFFI", mock.Anything, mock.Anything).Return(nil) + dh.mim.On("GetRootOrgDID", context.Background()).Return("firefly:org1", nil) + + action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, &core.Message{ + Header: core.MessageHeader{ + Tag: core.SystemTagDefineFFI, + }, + }, core.DataArray{data}, fftypes.NewUUID()) assert.NoError(t, err) + assert.Equal(t, HandlerResult{Action: core.ActionConfirm}, action) + err = bs.RunFinalize(context.Background()) + assert.NoError(t, err) +} + +func TestHandleFFILocalNameExists(t *testing.T) { + dh, bs := newTestDefinitionHandler(t) + defer dh.cleanup(t) + + ffi := testFFI() + ffi.Published = false + existing := &fftypes.FFI{ + Name: ffi.Name, + Version: ffi.Version, + } + + dh.mdi.On("InsertOrGetFFI", mock.Anything, mock.MatchedBy(func(f *fftypes.FFI) bool { + return f.Name == "math" + })).Return(existing, nil) + dh.mcm.On("ResolveFFI", mock.Anything, mock.Anything).Return(nil) + + action, err := dh.handleFFIDefinition(context.Background(), &bs.BatchState, ffi, fftypes.NewUUID(), true) + assert.Regexp(t, "FF10407", err) + assert.Equal(t, HandlerResult{Action: core.ActionReject}, action) err = bs.RunFinalize(context.Background()) assert.NoError(t, err) - mdi.AssertExpectations(t) } func TestPersistFFIValidateFFIFail(t *testing.T) { dh, _ := newTestDefinitionHandler(t) - mcm := dh.contracts.(*contractmocks.Manager) - mcm.On("ResolveFFI", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - retry, err := dh.persistFFI(context.Background(), testFFI()) + defer dh.cleanup(t) + + dh.mcm.On("ResolveFFI", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) + retry, err := dh.persistFFI(context.Background(), testFFI(), true) assert.Regexp(t, "FF10403", err) assert.False(t, retry) - mcm.AssertExpectations(t) } func TestHandleFFIBroadcastReject(t *testing.T) { dh, bs := newTestDefinitionHandler(t) - mdi := dh.database.(*databasemocks.Plugin) - mcm := dh.contracts.(*contractmocks.Manager) - mdi.On("InsertEvent", mock.Anything, mock.Anything).Return(nil) - mcm.On("ResolveFFI", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) + defer dh.cleanup(t) + action, err := dh.handleFFIBroadcast(context.Background(), &bs.BatchState, &core.Message{ Header: core.MessageHeader{ Tag: core.SystemTagDefineFFI, }, }, core.DataArray{}, fftypes.NewUUID()) + assert.Equal(t, HandlerResult{Action: core.ActionReject}, action) assert.Error(t, err) bs.assertNoFinalizers() @@ -159,95 +253,149 @@ func TestHandleFFIBroadcastReject(t *testing.T) { func TestPersistFFIUpsertFFIFail(t *testing.T) { dh, _ := newTestDefinitionHandler(t) - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("UpsertFFI", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - mcm := dh.contracts.(*contractmocks.Manager) - mcm.On("ResolveFFI", mock.Anything, mock.Anything).Return(nil) - retry, err := dh.persistFFI(context.Background(), testFFI()) + defer dh.cleanup(t) + + dh.mdi.On("InsertOrGetFFI", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("pop")) + dh.mcm.On("ResolveFFI", mock.Anything, mock.Anything).Return(nil) + retry, err := dh.persistFFI(context.Background(), testFFI(), true) assert.Regexp(t, "pop", err) assert.True(t, retry) - mdi.AssertExpectations(t) - mcm.AssertExpectations(t) } func TestPersistFFIUpsertFFIMethodFail(t *testing.T) { dh, _ := newTestDefinitionHandler(t) - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("UpsertFFI", mock.Anything, mock.Anything).Return(nil) - mdi.On("UpsertFFIMethod", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - mcm := dh.contracts.(*contractmocks.Manager) - mcm.On("ResolveFFI", mock.Anything, mock.Anything).Return(nil) - retry, err := dh.persistFFI(context.Background(), testFFI()) + defer dh.cleanup(t) + + dh.mdi.On("InsertOrGetFFI", mock.Anything, mock.Anything).Return(nil, nil) + dh.mdi.On("UpsertFFIMethod", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) + dh.mcm.On("ResolveFFI", mock.Anything, mock.Anything).Return(nil) + retry, err := dh.persistFFI(context.Background(), testFFI(), true) assert.Regexp(t, "pop", err) assert.True(t, retry) - mdi.AssertExpectations(t) - mcm.AssertExpectations(t) } func TestPersistFFIUpsertFFIEventFail(t *testing.T) { dh, _ := newTestDefinitionHandler(t) - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("UpsertFFI", mock.Anything, mock.Anything).Return(nil) - mdi.On("UpsertFFIMethod", mock.Anything, mock.Anything).Return(nil) - mdi.On("UpsertFFIEvent", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - mcm := dh.contracts.(*contractmocks.Manager) - mcm.On("ResolveFFI", mock.Anything, mock.Anything).Return(nil) - retry, err := dh.persistFFI(context.Background(), testFFI()) + defer dh.cleanup(t) + + dh.mdi.On("InsertOrGetFFI", mock.Anything, mock.Anything).Return(nil, nil) + dh.mdi.On("UpsertFFIMethod", mock.Anything, mock.Anything).Return(nil) + dh.mdi.On("UpsertFFIEvent", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) + dh.mcm.On("ResolveFFI", mock.Anything, mock.Anything).Return(nil) + retry, err := dh.persistFFI(context.Background(), testFFI(), true) assert.Regexp(t, "pop", err) assert.True(t, retry) - mdi.AssertExpectations(t) - mcm.AssertExpectations(t) } func TestPersistFFIUpsertFFIErrorFail(t *testing.T) { dh, _ := newTestDefinitionHandler(t) - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("UpsertFFI", mock.Anything, mock.Anything).Return(nil) - mdi.On("UpsertFFIMethod", mock.Anything, mock.Anything).Return(nil) - mdi.On("UpsertFFIEvent", mock.Anything, mock.Anything).Return(nil) - mdi.On("UpsertFFIError", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - mcm := dh.contracts.(*contractmocks.Manager) - mcm.On("ResolveFFI", mock.Anything, mock.Anything).Return(nil) - retry, err := dh.persistFFI(context.Background(), testFFI()) + defer dh.cleanup(t) + + dh.mdi.On("InsertOrGetFFI", mock.Anything, mock.Anything).Return(nil, nil) + dh.mdi.On("UpsertFFIMethod", mock.Anything, mock.Anything).Return(nil) + dh.mdi.On("UpsertFFIEvent", mock.Anything, mock.Anything).Return(nil) + dh.mdi.On("UpsertFFIError", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) + dh.mcm.On("ResolveFFI", mock.Anything, mock.Anything).Return(nil) + retry, err := dh.persistFFI(context.Background(), testFFI(), true) assert.Regexp(t, "pop", err) assert.True(t, retry) - mdi.AssertExpectations(t) - mcm.AssertExpectations(t) } -func TestHandleFFIBroadcastValidateFail(t *testing.T) { +func TestPersistFFILocalPublish(t *testing.T) { + dh, _ := newTestDefinitionHandler(t) + defer dh.cleanup(t) + + published := testFFI() + published.Message = fftypes.NewUUID() + existing := &fftypes.FFI{ + ID: published.ID, + } + + dh.mdi.On("InsertOrGetFFI", mock.Anything, mock.Anything).Return(existing, nil) + dh.mcm.On("ResolveFFI", mock.Anything, mock.Anything).Return(nil) + dh.mdi.On("UpsertFFI", mock.Anything, published, database.UpsertOptimizationExisting).Return(nil) + + retry, err := dh.persistFFI(context.Background(), published, true) + assert.NoError(t, err) + assert.False(t, retry) +} + +func TestPersistFFILocalPublishUpsertFail(t *testing.T) { + dh, _ := newTestDefinitionHandler(t) + defer dh.cleanup(t) + + published := testFFI() + published.Message = fftypes.NewUUID() + existing := &fftypes.FFI{ + ID: published.ID, + } + + dh.mdi.On("InsertOrGetFFI", mock.Anything, mock.Anything).Return(existing, nil) + dh.mcm.On("ResolveFFI", mock.Anything, mock.Anything).Return(nil) + dh.mdi.On("UpsertFFI", mock.Anything, published, database.UpsertOptimizationExisting).Return(fmt.Errorf("pop")) + + retry, err := dh.persistFFI(context.Background(), published, true) + assert.EqualError(t, err, "pop") + assert.True(t, retry) +} + +func TestPersistFFIWrongMessage(t *testing.T) { + dh, _ := newTestDefinitionHandler(t) + defer dh.cleanup(t) + + published := testFFI() + published.Message = fftypes.NewUUID() + existing := &fftypes.FFI{ + ID: published.ID, + Message: fftypes.NewUUID(), + } + + dh.mdi.On("InsertOrGetFFI", mock.Anything, mock.Anything).Return(existing, nil) + dh.mcm.On("ResolveFFI", mock.Anything, mock.Anything).Return(nil) + + retry, err := dh.persistFFI(context.Background(), published, true) + assert.Regexp(t, "FF10407", err) + assert.False(t, retry) +} + +func TestHandleFFIBroadcastOrgFail(t *testing.T) { dh, bs := newTestDefinitionHandler(t) + defer dh.cleanup(t) + ffi := testFFI() - ffi.Name = "*%^!$%^&*" b, err := json.Marshal(ffi) assert.NoError(t, err) data := &core.Data{ Value: fftypes.JSONAnyPtrBytes(b), } - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("InsertEvent", mock.Anything, mock.Anything).Return(nil) + + dh.mim.On("GetRootOrgDID", context.Background()).Return("", fmt.Errorf("pop")) + action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, &core.Message{ Header: core.MessageHeader{ Tag: core.SystemTagDefineFFI, }, }, core.DataArray{data}, fftypes.NewUUID()) - assert.Equal(t, HandlerResult{Action: core.ActionReject}, action) - assert.Error(t, err) + + assert.Equal(t, HandlerResult{Action: core.ActionRetry}, action) + assert.Regexp(t, "pop", err) bs.assertNoFinalizers() } func TestHandleFFIBroadcastPersistFail(t *testing.T) { dh, bs := newTestDefinitionHandler(t) + defer dh.cleanup(t) + ffi := testFFI() b, err := json.Marshal(ffi) assert.NoError(t, err) data := &core.Data{ Value: fftypes.JSONAnyPtrBytes(b), } - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("UpsertFFI", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - mcm := dh.contracts.(*contractmocks.Manager) - mcm.On("ResolveFFI", mock.Anything, mock.Anything).Return(nil) + dh.mdi.On("InsertOrGetFFI", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("pop")) + dh.mcm.On("ResolveFFI", mock.Anything, mock.Anything).Return(nil) + dh.mim.On("GetRootOrgDID", context.Background()).Return("firefly:org1", nil) + action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, &core.Message{ Header: core.MessageHeader{ Tag: core.SystemTagDefineFFI, @@ -256,47 +404,47 @@ func TestHandleFFIBroadcastPersistFail(t *testing.T) { assert.Equal(t, HandlerResult{Action: core.ActionRetry}, action) assert.Regexp(t, "pop", err) bs.assertNoFinalizers() - - mdi.AssertExpectations(t) - mcm.AssertExpectations(t) } func TestHandleFFIBroadcastResolveFail(t *testing.T) { dh, bs := newTestDefinitionHandler(t) + defer dh.cleanup(t) + ffi := testFFI() b, err := json.Marshal(ffi) assert.NoError(t, err) data := &core.Data{ Value: fftypes.JSONAnyPtrBytes(b), } - mcm := dh.contracts.(*contractmocks.Manager) - mcm.On("ResolveFFI", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) + + dh.mcm.On("ResolveFFI", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) + dh.mim.On("GetRootOrgDID", context.Background()).Return("firefly:org1", nil) + action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, &core.Message{ Header: core.MessageHeader{ Tag: core.SystemTagDefineFFI, }, }, core.DataArray{data}, fftypes.NewUUID()) + assert.Equal(t, HandlerResult{Action: core.ActionReject}, action) assert.Regexp(t, "pop", err) bs.assertNoFinalizers() - - mcm.AssertExpectations(t) } func TestHandleContractAPIBroadcastOk(t *testing.T) { dh, bs := newTestDefinitionHandler(t) + defer dh.cleanup(t) - b, err := json.Marshal(testFFI()) + b, err := json.Marshal(testContractAPI()) assert.NoError(t, err) data := &core.Data{ Value: fftypes.JSONAnyPtrBytes(b), } - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("UpsertContractAPI", mock.Anything, mock.Anything, mock.Anything).Return(nil) - mdi.On("InsertEvent", mock.Anything, mock.Anything).Return(nil) - mcm := dh.contracts.(*contractmocks.Manager) - mcm.On("ResolveContractAPI", context.Background(), "", mock.Anything).Return(nil) + dh.mdi.On("InsertOrGetContractAPI", mock.Anything, mock.Anything).Return(nil, nil) + dh.mdi.On("InsertEvent", mock.Anything, mock.Anything).Return(nil) + dh.mcm.On("ResolveContractAPI", context.Background(), "", mock.Anything).Return(nil) + dh.mim.On("GetRootOrgDID", context.Background()).Return("firefly:org1", nil) action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, &core.Message{ Header: core.MessageHeader{ @@ -307,13 +455,12 @@ func TestHandleContractAPIBroadcastOk(t *testing.T) { assert.NoError(t, err) err = bs.RunFinalize(context.Background()) assert.NoError(t, err) - - mdi.AssertExpectations(t) - mcm.AssertExpectations(t) } func TestHandleContractAPIBadPayload(t *testing.T) { dh, bs := newTestDefinitionHandler(t) + defer dh.cleanup(t) + data := &core.Data{ Value: fftypes.JSONAnyPtr("bad"), } @@ -327,79 +474,66 @@ func TestHandleContractAPIBadPayload(t *testing.T) { assert.Regexp(t, "FF10400", err) } -func TestHandleContractAPIIDMismatch(t *testing.T) { +func TestHandleContractAPIBroadcastPersistFail(t *testing.T) { dh, bs := newTestDefinitionHandler(t) + defer dh.cleanup(t) - b, err := json.Marshal(testFFI()) + b, err := json.Marshal(testContractAPI()) assert.NoError(t, err) data := &core.Data{ Value: fftypes.JSONAnyPtrBytes(b), } - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("UpsertContractAPI", mock.Anything, mock.Anything, mock.Anything).Return(database.IDMismatch) - mcm := dh.contracts.(*contractmocks.Manager) - mcm.On("ResolveContractAPI", context.Background(), "", mock.Anything).Return(nil) + dh.mdi.On("InsertOrGetContractAPI", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("pop")) + dh.mcm.On("ResolveContractAPI", context.Background(), "", mock.Anything).Return(nil) + dh.mim.On("GetRootOrgDID", context.Background()).Return("firefly:org1", nil) action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, &core.Message{ Header: core.MessageHeader{ Tag: core.SystemTagDefineContractAPI, }, }, core.DataArray{data}, fftypes.NewUUID()) - assert.Equal(t, HandlerResult{Action: core.ActionReject}, action) - assert.Regexp(t, "FF10404", err) - - mdi.AssertExpectations(t) - mcm.AssertExpectations(t) -} - -func TestPersistContractAPIUpsertFail(t *testing.T) { - dh, _ := newTestDefinitionHandler(t) - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("UpsertContractAPI", mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - mcm := dh.contracts.(*contractmocks.Manager) - mcm.On("ResolveContractAPI", context.Background(), "http://test", mock.Anything).Return(nil) - - _, err := dh.persistContractAPI(context.Background(), "http://test", testContractAPI()) + assert.Equal(t, HandlerResult{Action: core.ActionRetry}, action) assert.Regexp(t, "pop", err) - mdi.AssertExpectations(t) - mcm.AssertExpectations(t) + bs.assertNoFinalizers() } -func TestHandleContractAPIBroadcastValidateFail(t *testing.T) { +func TestHandleContractAPIBroadcastResolveFail(t *testing.T) { dh, bs := newTestDefinitionHandler(t) - api := testContractAPI() - api.Name = "*%^!$%^&*" - b, err := json.Marshal(api) + defer dh.cleanup(t) + + b, err := json.Marshal(testContractAPI()) assert.NoError(t, err) data := &core.Data{ Value: fftypes.JSONAnyPtrBytes(b), } - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("InsertEvent", mock.Anything, mock.Anything).Return(nil) + + dh.mcm.On("ResolveContractAPI", context.Background(), "", mock.Anything).Return(fmt.Errorf("pop")) + dh.mim.On("GetRootOrgDID", context.Background()).Return("firefly:org1", nil) + action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, &core.Message{ Header: core.MessageHeader{ Tag: core.SystemTagDefineContractAPI, }, }, core.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: core.ActionReject}, action) - assert.Error(t, err) + assert.Regexp(t, "pop", err) + bs.assertNoFinalizers() } -func TestHandleContractAPIBroadcastPersistFail(t *testing.T) { +func TestHandleContractAPIBroadcastOrgFail(t *testing.T) { dh, bs := newTestDefinitionHandler(t) - ffi := testFFI() - b, err := json.Marshal(ffi) + defer dh.cleanup(t) + + b, err := json.Marshal(testContractAPI()) assert.NoError(t, err) data := &core.Data{ Value: fftypes.JSONAnyPtrBytes(b), } - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("UpsertContractAPI", mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - mcm := dh.contracts.(*contractmocks.Manager) - mcm.On("ResolveContractAPI", context.Background(), "", mock.Anything).Return(nil) + + dh.mim.On("GetRootOrgDID", context.Background()).Return("", fmt.Errorf("pop")) action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, &core.Message{ Header: core.MessageHeader{ @@ -410,31 +544,157 @@ func TestHandleContractAPIBroadcastPersistFail(t *testing.T) { assert.Regexp(t, "pop", err) bs.assertNoFinalizers() +} - mdi.AssertExpectations(t) - mcm.AssertExpectations(t) +func TestPersistContractAPIConfirmMessage(t *testing.T) { + dh, _ := newTestDefinitionHandler(t) + defer dh.cleanup(t) + + api := testContractAPI() + api.Published = true + api.Message = fftypes.NewUUID() + existing := &core.ContractAPI{ + ID: api.ID, + Message: api.Message, + } + + dh.mdi.On("InsertOrGetContractAPI", mock.Anything, mock.Anything).Return(existing, nil) + dh.mcm.On("ResolveContractAPI", context.Background(), "", mock.Anything).Return(nil) + + _, err := dh.persistContractAPI(context.Background(), "", api, true) + assert.NoError(t, err) } -func TestHandleContractAPIBroadcastResolveFail(t *testing.T) { - dh, bs := newTestDefinitionHandler(t) - ffi := testFFI() - b, err := json.Marshal(ffi) +func TestPersistContractAPIUpsert(t *testing.T) { + dh, _ := newTestDefinitionHandler(t) + defer dh.cleanup(t) + + api := testContractAPI() + api.Published = true + api.Message = fftypes.NewUUID() + existing := &core.ContractAPI{ + ID: api.ID, + } + + dh.mdi.On("InsertOrGetContractAPI", mock.Anything, mock.Anything).Return(existing, nil) + dh.mcm.On("ResolveContractAPI", context.Background(), "", mock.Anything).Return(nil) + dh.mdi.On("UpsertContractAPI", context.Background(), api, database.UpsertOptimizationExisting).Return(nil) + + _, err := dh.persistContractAPI(context.Background(), "", api, true) assert.NoError(t, err) - data := &core.Data{ - Value: fftypes.JSONAnyPtrBytes(b), +} + +func TestPersistContractAPIUpsertFail(t *testing.T) { + dh, _ := newTestDefinitionHandler(t) + defer dh.cleanup(t) + + api := testContractAPI() + api.Published = true + api.Message = fftypes.NewUUID() + existing := &core.ContractAPI{ + ID: api.ID, } - mcm := dh.contracts.(*contractmocks.Manager) - mcm.On("ResolveContractAPI", context.Background(), "", mock.Anything).Return(fmt.Errorf("pop")) - action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, &core.Message{ - Header: core.MessageHeader{ - Tag: core.SystemTagDefineContractAPI, - }, - }, core.DataArray{data}, fftypes.NewUUID()) - assert.Equal(t, HandlerResult{Action: core.ActionReject}, action) - assert.Regexp(t, "pop", err) + dh.mdi.On("InsertOrGetContractAPI", mock.Anything, mock.Anything).Return(existing, nil) + dh.mcm.On("ResolveContractAPI", context.Background(), "", mock.Anything).Return(nil) + dh.mdi.On("UpsertContractAPI", context.Background(), api, database.UpsertOptimizationExisting).Return(fmt.Errorf("pop")) - bs.assertNoFinalizers() + _, err := dh.persistContractAPI(context.Background(), "", api, true) + assert.EqualError(t, err, "pop") +} + +func TestPersistContractAPIUpsertNonPublished(t *testing.T) { + dh, _ := newTestDefinitionHandler(t) + defer dh.cleanup(t) + + api := testContractAPI() + api.Published = false + api.Message = fftypes.NewUUID() + existing := &core.ContractAPI{ + ID: api.ID, + } + + dh.mdi.On("InsertOrGetContractAPI", mock.Anything, mock.Anything).Return(existing, nil) + dh.mcm.On("ResolveContractAPI", context.Background(), "", mock.Anything).Return(nil) + dh.mdi.On("UpsertContractAPI", context.Background(), api, database.UpsertOptimizationExisting).Return(nil) + + _, err := dh.persistContractAPI(context.Background(), "", api, true) + assert.NoError(t, err) +} + +func TestPersistContractAPIUpsertFailNonPublished(t *testing.T) { + dh, _ := newTestDefinitionHandler(t) + defer dh.cleanup(t) + + api := testContractAPI() + api.Published = false + api.Message = fftypes.NewUUID() + existing := &core.ContractAPI{ + ID: api.ID, + } + + dh.mdi.On("InsertOrGetContractAPI", mock.Anything, mock.Anything).Return(existing, nil) + dh.mcm.On("ResolveContractAPI", context.Background(), "", mock.Anything).Return(nil) + dh.mdi.On("UpsertContractAPI", context.Background(), api, database.UpsertOptimizationExisting).Return(fmt.Errorf("pop")) + + _, err := dh.persistContractAPI(context.Background(), "", api, true) + assert.EqualError(t, err, "pop") +} +func TestPersistContractAPIWrongMessage(t *testing.T) { + dh, _ := newTestDefinitionHandler(t) + defer dh.cleanup(t) + + api := testContractAPI() + api.Published = true + api.Message = fftypes.NewUUID() + existing := &core.ContractAPI{ + ID: api.ID, + Message: fftypes.NewUUID(), + } + + dh.mdi.On("InsertOrGetContractAPI", mock.Anything, mock.Anything).Return(existing, nil) + dh.mcm.On("ResolveContractAPI", context.Background(), "", mock.Anything).Return(nil) + + _, err := dh.persistContractAPI(context.Background(), "", api, true) + assert.Regexp(t, "FF10407", err) +} + +func TestPersistContractAPINameConflict(t *testing.T) { + dh, _ := newTestDefinitionHandler(t) + defer dh.cleanup(t) + + api := testContractAPI() + api.Published = true + api.Message = fftypes.NewUUID() + existing := &core.ContractAPI{ + ID: fftypes.NewUUID(), + Name: api.Name, + } + + dh.mdi.On("InsertOrGetContractAPI", mock.Anything, mock.Anything).Return(existing, nil).Once() + dh.mdi.On("InsertOrGetContractAPI", mock.Anything, mock.Anything).Return(nil, nil).Once() + dh.mcm.On("ResolveContractAPI", context.Background(), "", mock.Anything).Return(nil) + + _, err := dh.persistContractAPI(context.Background(), "", api, true) + assert.NoError(t, err) + assert.Equal(t, "math-1", api.Name) +} + +func TestPersistContractAPINetworkNameConflict(t *testing.T) { + dh, _ := newTestDefinitionHandler(t) + defer dh.cleanup(t) + + api := testContractAPI() + api.Published = true + api.Message = fftypes.NewUUID() + existing := &core.ContractAPI{ + ID: fftypes.NewUUID(), + NetworkName: api.NetworkName, + } + + dh.mdi.On("InsertOrGetContractAPI", mock.Anything, mock.Anything).Return(existing, nil) + dh.mcm.On("ResolveContractAPI", context.Background(), "", mock.Anything).Return(nil) - mcm.AssertExpectations(t) + _, err := dh.persistContractAPI(context.Background(), "", api, true) + assert.Regexp(t, "FF10407", err) } diff --git a/internal/definitions/handler_datatype_test.go b/internal/definitions/handler_datatype_test.go index 69c12ad633..fc72fa1bf1 100644 --- a/internal/definitions/handler_datatype_test.go +++ b/internal/definitions/handler_datatype_test.go @@ -23,8 +23,6 @@ import ( "testing" "github.com/hyperledger/firefly-common/pkg/fftypes" - "github.com/hyperledger/firefly/mocks/databasemocks" - "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/pkg/core" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -32,6 +30,7 @@ import ( func TestHandleDefinitionBroadcastDatatypeOk(t *testing.T) { dh, bs := newTestDefinitionHandler(t) + defer dh.cleanup(t) dt := &core.Datatype{ ID: fftypes.NewUUID(), @@ -48,12 +47,11 @@ func TestHandleDefinitionBroadcastDatatypeOk(t *testing.T) { Value: fftypes.JSONAnyPtrBytes(b), } - mdm := dh.data.(*datamocks.Manager) - mdm.On("CheckDatatype", mock.Anything, mock.Anything).Return(nil) - mbi := dh.database.(*databasemocks.Plugin) - mbi.On("GetDatatypeByName", mock.Anything, "ns1", "name1", "ver1").Return(nil, nil) - mbi.On("UpsertDatatype", mock.Anything, mock.Anything, false).Return(nil) - mbi.On("InsertEvent", mock.Anything, mock.Anything).Return(nil) + dh.mdm.On("CheckDatatype", mock.Anything, mock.Anything).Return(nil) + dh.mdi.On("GetDatatypeByName", mock.Anything, "ns1", "name1", "ver1").Return(nil, nil) + dh.mdi.On("UpsertDatatype", mock.Anything, mock.Anything, false).Return(nil) + dh.mdi.On("InsertEvent", mock.Anything, mock.Anything).Return(nil) + action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, &core.Message{ Header: core.MessageHeader{ Tag: core.SystemTagDefineDatatype, @@ -63,13 +61,11 @@ func TestHandleDefinitionBroadcastDatatypeOk(t *testing.T) { assert.NoError(t, err) err = bs.RunFinalize(context.Background()) assert.NoError(t, err) - - mdm.AssertExpectations(t) - mbi.AssertExpectations(t) } func TestHandleDefinitionBroadcastDatatypeEventFail(t *testing.T) { dh, bs := newTestDefinitionHandler(t) + defer dh.cleanup(t) dt := &core.Datatype{ ID: fftypes.NewUUID(), @@ -86,12 +82,10 @@ func TestHandleDefinitionBroadcastDatatypeEventFail(t *testing.T) { Value: fftypes.JSONAnyPtrBytes(b), } - mdm := dh.data.(*datamocks.Manager) - mdm.On("CheckDatatype", mock.Anything, mock.Anything).Return(nil) - mbi := dh.database.(*databasemocks.Plugin) - mbi.On("GetDatatypeByName", mock.Anything, "ns1", "name1", "ver1").Return(nil, nil) - mbi.On("UpsertDatatype", mock.Anything, mock.Anything, false).Return(nil) - mbi.On("InsertEvent", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) + dh.mdm.On("CheckDatatype", mock.Anything, mock.Anything).Return(nil) + dh.mdi.On("GetDatatypeByName", mock.Anything, "ns1", "name1", "ver1").Return(nil, nil) + dh.mdi.On("UpsertDatatype", mock.Anything, mock.Anything, false).Return(nil) + dh.mdi.On("InsertEvent", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, &core.Message{ Header: core.MessageHeader{ Tag: core.SystemTagDefineDatatype, @@ -101,13 +95,11 @@ func TestHandleDefinitionBroadcastDatatypeEventFail(t *testing.T) { assert.NoError(t, err) err = bs.RunFinalize(context.Background()) assert.EqualError(t, err, "pop") - - mdm.AssertExpectations(t) - mbi.AssertExpectations(t) } func TestHandleDefinitionBroadcastDatatypeMissingID(t *testing.T) { dh, bs := newTestDefinitionHandler(t) + defer dh.cleanup(t) dt := &core.Datatype{ Validator: core.ValidatorTypeJSON, @@ -135,6 +127,7 @@ func TestHandleDefinitionBroadcastDatatypeMissingID(t *testing.T) { func TestHandleDefinitionBroadcastBadSchema(t *testing.T) { dh, bs := newTestDefinitionHandler(t) + defer dh.cleanup(t) dt := &core.Datatype{ ID: fftypes.NewUUID(), @@ -151,8 +144,7 @@ func TestHandleDefinitionBroadcastBadSchema(t *testing.T) { Value: fftypes.JSONAnyPtrBytes(b), } - mdm := dh.data.(*datamocks.Manager) - mdm.On("CheckDatatype", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) + dh.mdm.On("CheckDatatype", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, &core.Message{ Header: core.MessageHeader{ Tag: core.SystemTagDefineDatatype, @@ -161,12 +153,12 @@ func TestHandleDefinitionBroadcastBadSchema(t *testing.T) { assert.Equal(t, HandlerResult{Action: core.ActionReject}, action) assert.Error(t, err) - mdm.AssertExpectations(t) bs.assertNoFinalizers() } func TestHandleDefinitionBroadcastMissingData(t *testing.T) { dh, bs := newTestDefinitionHandler(t) + defer dh.cleanup(t) dt := &core.Datatype{ ID: fftypes.NewUUID(), @@ -190,6 +182,7 @@ func TestHandleDefinitionBroadcastMissingData(t *testing.T) { func TestHandleDefinitionBroadcastDatatypeLookupFail(t *testing.T) { dh, bs := newTestDefinitionHandler(t) + defer dh.cleanup(t) dt := &core.Datatype{ ID: fftypes.NewUUID(), @@ -206,10 +199,8 @@ func TestHandleDefinitionBroadcastDatatypeLookupFail(t *testing.T) { Value: fftypes.JSONAnyPtrBytes(b), } - mdm := dh.data.(*datamocks.Manager) - mdm.On("CheckDatatype", mock.Anything, mock.Anything).Return(nil) - mbi := dh.database.(*databasemocks.Plugin) - mbi.On("GetDatatypeByName", mock.Anything, "ns1", "name1", "ver1").Return(nil, fmt.Errorf("pop")) + dh.mdm.On("CheckDatatype", mock.Anything, mock.Anything).Return(nil) + dh.mdi.On("GetDatatypeByName", mock.Anything, "ns1", "name1", "ver1").Return(nil, fmt.Errorf("pop")) action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, &core.Message{ Header: core.MessageHeader{ Namespace: "ns1", @@ -219,13 +210,12 @@ func TestHandleDefinitionBroadcastDatatypeLookupFail(t *testing.T) { assert.Equal(t, HandlerResult{Action: core.ActionRetry}, action) assert.EqualError(t, err, "pop") - mdm.AssertExpectations(t) - mbi.AssertExpectations(t) bs.assertNoFinalizers() } func TestHandleDefinitionBroadcastUpsertFail(t *testing.T) { dh, bs := newTestDefinitionHandler(t) + defer dh.cleanup(t) dt := &core.Datatype{ ID: fftypes.NewUUID(), @@ -242,11 +232,9 @@ func TestHandleDefinitionBroadcastUpsertFail(t *testing.T) { Value: fftypes.JSONAnyPtrBytes(b), } - mdm := dh.data.(*datamocks.Manager) - mdm.On("CheckDatatype", mock.Anything, mock.Anything).Return(nil) - mbi := dh.database.(*databasemocks.Plugin) - mbi.On("GetDatatypeByName", mock.Anything, "ns1", "name1", "ver1").Return(nil, nil) - mbi.On("UpsertDatatype", mock.Anything, mock.Anything, false).Return(fmt.Errorf("pop")) + dh.mdm.On("CheckDatatype", mock.Anything, mock.Anything).Return(nil) + dh.mdi.On("GetDatatypeByName", mock.Anything, "ns1", "name1", "ver1").Return(nil, nil) + dh.mdi.On("UpsertDatatype", mock.Anything, mock.Anything, false).Return(fmt.Errorf("pop")) action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, &core.Message{ Header: core.MessageHeader{ Tag: core.SystemTagDefineDatatype, @@ -255,13 +243,12 @@ func TestHandleDefinitionBroadcastUpsertFail(t *testing.T) { assert.Equal(t, HandlerResult{Action: core.ActionRetry}, action) assert.EqualError(t, err, "pop") - mdm.AssertExpectations(t) - mbi.AssertExpectations(t) bs.assertNoFinalizers() } func TestHandleDefinitionBroadcastDatatypeDuplicate(t *testing.T) { dh, bs := newTestDefinitionHandler(t) + defer dh.cleanup(t) dt := &core.Datatype{ ID: fftypes.NewUUID(), @@ -278,10 +265,8 @@ func TestHandleDefinitionBroadcastDatatypeDuplicate(t *testing.T) { Value: fftypes.JSONAnyPtrBytes(b), } - mdm := dh.data.(*datamocks.Manager) - mdm.On("CheckDatatype", mock.Anything, mock.Anything).Return(nil) - mbi := dh.database.(*databasemocks.Plugin) - mbi.On("GetDatatypeByName", mock.Anything, "ns1", "name1", "ver1").Return(dt, nil) + dh.mdm.On("CheckDatatype", mock.Anything, mock.Anything).Return(nil) + dh.mdi.On("GetDatatypeByName", mock.Anything, "ns1", "name1", "ver1").Return(dt, nil) action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, &core.Message{ Header: core.MessageHeader{ Tag: core.SystemTagDefineDatatype, @@ -290,7 +275,5 @@ func TestHandleDefinitionBroadcastDatatypeDuplicate(t *testing.T) { assert.Equal(t, HandlerResult{Action: core.ActionReject}, action) assert.Error(t, err) - mdm.AssertExpectations(t) - mbi.AssertExpectations(t) bs.assertNoFinalizers() } diff --git a/internal/definitions/handler_identity_claim_test.go b/internal/definitions/handler_identity_claim_test.go index 60bbaae902..17074f044c 100644 --- a/internal/definitions/handler_identity_claim_test.go +++ b/internal/definitions/handler_identity_claim_test.go @@ -23,9 +23,6 @@ import ( "testing" "github.com/hyperledger/firefly-common/pkg/fftypes" - "github.com/hyperledger/firefly/mocks/databasemocks" - "github.com/hyperledger/firefly/mocks/datamocks" - "github.com/hyperledger/firefly/mocks/identitymanagermocks" "github.com/hyperledger/firefly/pkg/core" "github.com/hyperledger/firefly/pkg/database" "github.com/stretchr/testify/assert" @@ -141,38 +138,34 @@ func testCustomClaimAndVerification(t *testing.T) (*core.Identity, *core.Identit func TestHandleDefinitionIdentityClaimCustomWithExistingParentVerificationOk(t *testing.T) { dh, bs := newTestDefinitionHandler(t) - ctx := context.Background() + defer dh.cleanup(t) + ctx := context.Background() custom1, org1, claimMsg, claimData, verifyMsg, verifyData := testCustomClaimAndVerification(t) - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) - - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("GetIdentityByName", ctx, custom1.Type, custom1.Namespace, custom1.Name).Return(nil, nil) - mdi.On("GetIdentityByID", ctx, "ns1", custom1.ID).Return(nil, nil) - mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, "ns1", "0x12345").Return(nil, nil) - mdi.On("GetMessages", ctx, "ns1", mock.Anything).Return([]*core.Message{ + dh.mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) + dh.mdi.On("GetIdentityByName", ctx, custom1.Type, custom1.Namespace, custom1.Name).Return(nil, nil) + dh.mdi.On("GetIdentityByID", ctx, "ns1", custom1.ID).Return(nil, nil) + dh.mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, "ns1", "0x12345").Return(nil, nil) + dh.mdi.On("GetMessages", ctx, "ns1", mock.Anything).Return([]*core.Message{ {Header: core.MessageHeader{ID: fftypes.NewUUID(), Tag: "skipped missing data"}}, }, nil, nil) - mdi.On("UpsertIdentity", ctx, mock.MatchedBy(func(identity *core.Identity) bool { + dh.mdi.On("UpsertIdentity", ctx, mock.MatchedBy(func(identity *core.Identity) bool { assert.Equal(t, *claimMsg.Header.ID, *identity.Messages.Claim) assert.Equal(t, *verifyMsg.Header.ID, *identity.Messages.Verification) return true }), database.UpsertOptimizationNew).Return(nil) - mdi.On("UpsertVerifier", ctx, mock.MatchedBy(func(verifier *core.Verifier) bool { + dh.mdi.On("UpsertVerifier", ctx, mock.MatchedBy(func(verifier *core.Verifier) bool { assert.Equal(t, core.VerifierTypeEthAddress, verifier.Type) assert.Equal(t, "0x12345", verifier.Value) assert.Equal(t, *custom1.ID, *verifier.Identity) return true }), database.UpsertOptimizationNew).Return(nil) - mdi.On("InsertEvent", mock.Anything, mock.MatchedBy(func(event *core.Event) bool { + dh.mdi.On("InsertEvent", mock.Anything, mock.MatchedBy(func(event *core.Event) bool { return event.Type == core.EventTypeIdentityConfirmed })).Return(nil) - - mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(core.DataArray{verifyData}, false, nil).Once() - mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(core.DataArray{verifyData}, true, nil) + dh.mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(core.DataArray{verifyData}, false, nil).Once() + dh.mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(core.DataArray{verifyData}, true, nil) dh.multiparty = true @@ -185,26 +178,19 @@ func TestHandleDefinitionIdentityClaimCustomWithExistingParentVerificationOk(t * err = bs.RunFinalize(ctx) assert.NoError(t, err) - - mdi.AssertExpectations(t) - mdm.AssertExpectations(t) - mim.AssertExpectations(t) - } func TestHandleDefinitionIdentityClaimIdempotentReplay(t *testing.T) { dh, bs := newTestDefinitionHandler(t) - ctx := context.Background() + defer dh.cleanup(t) + ctx := context.Background() custom1, org1, claimMsg, claimData, verifyMsg, verifyData := testCustomClaimAndVerification(t) - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) - - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("GetIdentityByName", ctx, custom1.Type, custom1.Namespace, custom1.Name).Return(nil, nil) - mdi.On("GetIdentityByID", ctx, "ns1", custom1.ID).Return(custom1, nil) - mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, "ns1", "0x12345").Return(&core.Verifier{ + dh.mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) + dh.mdi.On("GetIdentityByName", ctx, custom1.Type, custom1.Namespace, custom1.Name).Return(nil, nil) + dh.mdi.On("GetIdentityByID", ctx, "ns1", custom1.ID).Return(custom1, nil) + dh.mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, "ns1", "0x12345").Return(&core.Verifier{ Identity: custom1.ID, Namespace: "ns1", VerifierRef: core.VerifierRef{ @@ -212,16 +198,14 @@ func TestHandleDefinitionIdentityClaimIdempotentReplay(t *testing.T) { Value: "0x12345", }, }, nil) - mdi.On("GetMessages", ctx, "ns1", mock.Anything).Return([]*core.Message{ + dh.mdi.On("GetMessages", ctx, "ns1", mock.Anything).Return([]*core.Message{ {Header: core.MessageHeader{ID: fftypes.NewUUID(), Tag: "skipped missing data"}}, }, nil, nil) - mdi.On("InsertEvent", mock.Anything, mock.MatchedBy(func(event *core.Event) bool { + dh.mdi.On("InsertEvent", mock.Anything, mock.MatchedBy(func(event *core.Event) bool { return event.Type == core.EventTypeIdentityConfirmed })).Return(nil) - - mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(core.DataArray{verifyData}, false, nil).Once() - mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(core.DataArray{verifyData}, true, nil) + dh.mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(core.DataArray{verifyData}, false, nil).Once() + dh.mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(core.DataArray{verifyData}, true, nil) dh.multiparty = true @@ -233,31 +217,23 @@ func TestHandleDefinitionIdentityClaimIdempotentReplay(t *testing.T) { err = bs.RunFinalize(ctx) assert.NoError(t, err) - - mim.AssertExpectations(t) - mdi.AssertExpectations(t) - mdm.AssertExpectations(t) } func TestHandleDefinitionIdentityClaimFailInsertIdentity(t *testing.T) { dh, bs := newTestDefinitionHandler(t) - ctx := context.Background() + defer dh.cleanup(t) + ctx := context.Background() custom1, org1, claimMsg, claimData, verifyMsg, verifyData := testCustomClaimAndVerification(t) - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) - - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("GetIdentityByName", ctx, custom1.Type, custom1.Namespace, custom1.Name).Return(nil, nil) - mdi.On("GetIdentityByID", ctx, "ns1", custom1.ID).Return(nil, nil) - mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, "ns1", "0x12345").Return(nil, nil) - mdi.On("GetMessages", ctx, "ns1", mock.Anything).Return([]*core.Message{}, nil, nil) - mdi.On("UpsertVerifier", ctx, mock.Anything, database.UpsertOptimizationNew).Return(nil) - mdi.On("UpsertIdentity", ctx, mock.Anything, database.UpsertOptimizationNew).Return(fmt.Errorf("pop")) - - mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(core.DataArray{verifyData}, true, nil) + dh.mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) + dh.mdi.On("GetIdentityByName", ctx, custom1.Type, custom1.Namespace, custom1.Name).Return(nil, nil) + dh.mdi.On("GetIdentityByID", ctx, "ns1", custom1.ID).Return(nil, nil) + dh.mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, "ns1", "0x12345").Return(nil, nil) + dh.mdi.On("GetMessages", ctx, "ns1", mock.Anything).Return([]*core.Message{}, nil, nil) + dh.mdi.On("UpsertVerifier", ctx, mock.Anything, database.UpsertOptimizationNew).Return(nil) + dh.mdi.On("UpsertIdentity", ctx, mock.Anything, database.UpsertOptimizationNew).Return(fmt.Errorf("pop")) + dh.mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(core.DataArray{verifyData}, true, nil) dh.multiparty = true @@ -267,29 +243,22 @@ func TestHandleDefinitionIdentityClaimFailInsertIdentity(t *testing.T) { assert.Equal(t, HandlerResult{Action: core.ActionRetry}, action) assert.Regexp(t, "pop", err) - mim.AssertExpectations(t) - mdi.AssertExpectations(t) - mdm.AssertExpectations(t) bs.assertNoFinalizers() } func TestHandleDefinitionIdentityClaimVerificationDataFail(t *testing.T) { dh, bs := newTestDefinitionHandler(t) - ctx := context.Background() + defer dh.cleanup(t) + ctx := context.Background() custom1, org1, claimMsg, claimData, verifyMsg, _ := testCustomClaimAndVerification(t) - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) - - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("GetIdentityByName", ctx, custom1.Type, custom1.Namespace, custom1.Name).Return(nil, nil) - mdi.On("GetIdentityByID", ctx, "ns1", custom1.ID).Return(nil, nil) - mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, "ns1", "0x12345").Return(nil, nil) - mdi.On("GetMessages", ctx, "ns1", mock.Anything).Return([]*core.Message{}, nil, nil) - - mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(nil, false, fmt.Errorf("pop")) + dh.mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) + dh.mdi.On("GetIdentityByName", ctx, custom1.Type, custom1.Namespace, custom1.Name).Return(nil, nil) + dh.mdi.On("GetIdentityByID", ctx, "ns1", custom1.ID).Return(nil, nil) + dh.mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, "ns1", "0x12345").Return(nil, nil) + dh.mdi.On("GetMessages", ctx, "ns1", mock.Anything).Return([]*core.Message{}, nil, nil) + dh.mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(nil, false, fmt.Errorf("pop")) dh.multiparty = true @@ -299,29 +268,22 @@ func TestHandleDefinitionIdentityClaimVerificationDataFail(t *testing.T) { assert.Equal(t, HandlerResult{Action: core.ActionRetry}, action) assert.Regexp(t, "pop", err) - mim.AssertExpectations(t) - mdi.AssertExpectations(t) - mdm.AssertExpectations(t) bs.assertNoFinalizers() } func TestHandleDefinitionIdentityClaimVerificationMissingData(t *testing.T) { dh, bs := newTestDefinitionHandler(t) - ctx := context.Background() + defer dh.cleanup(t) + ctx := context.Background() custom1, org1, claimMsg, claimData, verifyMsg, _ := testCustomClaimAndVerification(t) - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) - - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("GetIdentityByName", ctx, custom1.Type, custom1.Namespace, custom1.Name).Return(nil, nil) - mdi.On("GetIdentityByID", ctx, "ns1", custom1.ID).Return(nil, nil) - mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, "ns1", "0x12345").Return(nil, nil) - mdi.On("GetMessages", ctx, "ns1", mock.Anything).Return([]*core.Message{}, nil, nil) - - mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(core.DataArray{}, true, nil) + dh.mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) + dh.mdi.On("GetIdentityByName", ctx, custom1.Type, custom1.Namespace, custom1.Name).Return(nil, nil) + dh.mdi.On("GetIdentityByID", ctx, "ns1", custom1.ID).Return(nil, nil) + dh.mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, "ns1", "0x12345").Return(nil, nil) + dh.mdi.On("GetMessages", ctx, "ns1", mock.Anything).Return([]*core.Message{}, nil, nil) + dh.mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(core.DataArray{}, true, nil) dh.multiparty = true @@ -331,30 +293,23 @@ func TestHandleDefinitionIdentityClaimVerificationMissingData(t *testing.T) { assert.Equal(t, HandlerResult{Action: core.ActionConfirm}, action) assert.NoError(t, err) - mim.AssertExpectations(t) - mdi.AssertExpectations(t) - mdm.AssertExpectations(t) bs.assertNoFinalizers() } func TestHandleDefinitionIdentityClaimFailInsertVerifier(t *testing.T) { dh, bs := newTestDefinitionHandler(t) - ctx := context.Background() + defer dh.cleanup(t) + ctx := context.Background() custom1, org1, claimMsg, claimData, verifyMsg, verifyData := testCustomClaimAndVerification(t) - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) - - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("GetIdentityByName", ctx, custom1.Type, custom1.Namespace, custom1.Name).Return(nil, nil) - mdi.On("GetIdentityByID", ctx, "ns1", custom1.ID).Return(nil, nil) - mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, "ns1", "0x12345").Return(nil, nil) - mdi.On("GetMessages", ctx, "ns1", mock.Anything).Return([]*core.Message{}, nil, nil) - mdi.On("UpsertVerifier", ctx, mock.Anything, database.UpsertOptimizationNew).Return(fmt.Errorf("pop")) - - mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(core.DataArray{verifyData}, true, nil) + dh.mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) + dh.mdi.On("GetIdentityByName", ctx, custom1.Type, custom1.Namespace, custom1.Name).Return(nil, nil) + dh.mdi.On("GetIdentityByID", ctx, "ns1", custom1.ID).Return(nil, nil) + dh.mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, "ns1", "0x12345").Return(nil, nil) + dh.mdi.On("GetMessages", ctx, "ns1", mock.Anything).Return([]*core.Message{}, nil, nil) + dh.mdi.On("UpsertVerifier", ctx, mock.Anything, database.UpsertOptimizationNew).Return(fmt.Errorf("pop")) + dh.mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(core.DataArray{verifyData}, true, nil) dh.multiparty = true @@ -364,26 +319,21 @@ func TestHandleDefinitionIdentityClaimFailInsertVerifier(t *testing.T) { assert.Equal(t, HandlerResult{Action: core.ActionRetry}, action) assert.Regexp(t, "pop", err) - mim.AssertExpectations(t) - mdi.AssertExpectations(t) - mdm.AssertExpectations(t) bs.assertNoFinalizers() } func TestHandleDefinitionIdentityClaimCustomMissingParentVerificationOk(t *testing.T) { dh, bs := newTestDefinitionHandler(t) - ctx := context.Background() + defer dh.cleanup(t) + ctx := context.Background() custom1, org1, claimMsg, claimData, _, _ := testCustomClaimAndVerification(t) - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) - - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("GetIdentityByName", ctx, custom1.Type, custom1.Namespace, custom1.Name).Return(nil, nil) - mdi.On("GetIdentityByID", ctx, "ns1", custom1.ID).Return(nil, nil) - mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, "ns1", "0x12345").Return(nil, nil) - mdi.On("GetMessages", ctx, "ns1", mock.Anything).Return([]*core.Message{}, nil, nil) + dh.mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) + dh.mdi.On("GetIdentityByName", ctx, custom1.Type, custom1.Namespace, custom1.Name).Return(nil, nil) + dh.mdi.On("GetIdentityByID", ctx, "ns1", custom1.ID).Return(nil, nil) + dh.mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, "ns1", "0x12345").Return(nil, nil) + dh.mdi.On("GetMessages", ctx, "ns1", mock.Anything).Return([]*core.Message{}, nil, nil) dh.multiparty = true @@ -391,25 +341,21 @@ func TestHandleDefinitionIdentityClaimCustomMissingParentVerificationOk(t *testi assert.Equal(t, HandlerResult{Action: core.ActionConfirm}, action) // Just wait for the verification to come in later assert.NoError(t, err) - mim.AssertExpectations(t) - mdi.AssertExpectations(t) bs.assertNoFinalizers() } func TestHandleDefinitionIdentityClaimCustomParentVerificationFail(t *testing.T) { dh, bs := newTestDefinitionHandler(t) - ctx := context.Background() + defer dh.cleanup(t) + ctx := context.Background() custom1, org1, claimMsg, claimData, _, _ := testCustomClaimAndVerification(t) - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) - - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("GetIdentityByName", ctx, custom1.Type, custom1.Namespace, custom1.Name).Return(nil, nil) - mdi.On("GetIdentityByID", ctx, "ns1", custom1.ID).Return(nil, nil) - mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, "ns1", "0x12345").Return(nil, nil) - mdi.On("GetMessages", ctx, "ns1", mock.Anything).Return(nil, nil, fmt.Errorf("pop")) + dh.mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) + dh.mdi.On("GetIdentityByName", ctx, custom1.Type, custom1.Namespace, custom1.Name).Return(nil, nil) + dh.mdi.On("GetIdentityByID", ctx, "ns1", custom1.ID).Return(nil, nil) + dh.mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, "ns1", "0x12345").Return(nil, nil) + dh.mdi.On("GetMessages", ctx, "ns1", mock.Anything).Return(nil, nil, fmt.Errorf("pop")) dh.multiparty = true @@ -417,24 +363,20 @@ func TestHandleDefinitionIdentityClaimCustomParentVerificationFail(t *testing.T) assert.Equal(t, HandlerResult{Action: core.ActionRetry}, action) assert.Regexp(t, "pop", err) - mim.AssertExpectations(t) - mdi.AssertExpectations(t) bs.assertNoFinalizers() } func TestHandleDefinitionIdentityClaimVerifierClash(t *testing.T) { dh, bs := newTestDefinitionHandler(t) - ctx := context.Background() + defer dh.cleanup(t) + ctx := context.Background() custom1, org1, claimMsg, claimData, _, _ := testCustomClaimAndVerification(t) - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) - - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("GetIdentityByName", ctx, custom1.Type, custom1.Namespace, custom1.Name).Return(nil, nil) - mdi.On("GetIdentityByID", ctx, "ns1", custom1.ID).Return(nil, nil) - mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, "ns1", "0x12345").Return(&core.Verifier{ + dh.mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) + dh.mdi.On("GetIdentityByName", ctx, custom1.Type, custom1.Namespace, custom1.Name).Return(nil, nil) + dh.mdi.On("GetIdentityByID", ctx, "ns1", custom1.ID).Return(nil, nil) + dh.mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, "ns1", "0x12345").Return(&core.Verifier{ Hash: fftypes.NewRandB32(), }, nil) @@ -444,24 +386,20 @@ func TestHandleDefinitionIdentityClaimVerifierClash(t *testing.T) { assert.Equal(t, HandlerResult{Action: core.ActionReject}, action) assert.Error(t, err) - mim.AssertExpectations(t) - mdi.AssertExpectations(t) bs.assertNoFinalizers() } func TestHandleDefinitionIdentityClaimVerifierError(t *testing.T) { dh, bs := newTestDefinitionHandler(t) - ctx := context.Background() + defer dh.cleanup(t) + ctx := context.Background() custom1, org1, claimMsg, claimData, _, _ := testCustomClaimAndVerification(t) - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) - - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("GetIdentityByName", ctx, custom1.Type, custom1.Namespace, custom1.Name).Return(nil, nil) - mdi.On("GetIdentityByID", ctx, "ns1", custom1.ID).Return(nil, nil) - mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, "ns1", "0x12345").Return(nil, fmt.Errorf("pop")) + dh.mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) + dh.mdi.On("GetIdentityByName", ctx, custom1.Type, custom1.Namespace, custom1.Name).Return(nil, nil) + dh.mdi.On("GetIdentityByID", ctx, "ns1", custom1.ID).Return(nil, nil) + dh.mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, "ns1", "0x12345").Return(nil, fmt.Errorf("pop")) dh.multiparty = true @@ -469,22 +407,18 @@ func TestHandleDefinitionIdentityClaimVerifierError(t *testing.T) { assert.Equal(t, HandlerResult{Action: core.ActionRetry}, action) assert.Regexp(t, "pop", err) - mim.AssertExpectations(t) - mdi.AssertExpectations(t) bs.assertNoFinalizers() } func TestHandleDefinitionIdentityClaimIdentityClash(t *testing.T) { dh, bs := newTestDefinitionHandler(t) - ctx := context.Background() + defer dh.cleanup(t) + ctx := context.Background() custom1, org1, claimMsg, claimData, _, _ := testCustomClaimAndVerification(t) - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) - - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("GetIdentityByName", ctx, custom1.Type, custom1.Namespace, custom1.Name).Return(&core.Identity{ + dh.mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) + dh.mdi.On("GetIdentityByName", ctx, custom1.Type, custom1.Namespace, custom1.Name).Return(&core.Identity{ IdentityBase: core.IdentityBase{ ID: fftypes.NewUUID(), }, @@ -496,23 +430,19 @@ func TestHandleDefinitionIdentityClaimIdentityClash(t *testing.T) { assert.Equal(t, HandlerResult{Action: core.ActionReject}, action) assert.Error(t, err) - mim.AssertExpectations(t) - mdi.AssertExpectations(t) bs.assertNoFinalizers() } func TestHandleDefinitionIdentityClaimIdentityError(t *testing.T) { dh, bs := newTestDefinitionHandler(t) - ctx := context.Background() + defer dh.cleanup(t) + ctx := context.Background() custom1, org1, claimMsg, claimData, _, _ := testCustomClaimAndVerification(t) - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) - - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("GetIdentityByName", ctx, custom1.Type, custom1.Namespace, custom1.Name).Return(nil, nil) - mdi.On("GetIdentityByID", ctx, "ns1", custom1.ID).Return(nil, fmt.Errorf("pop")) + dh.mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) + dh.mdi.On("GetIdentityByName", ctx, custom1.Type, custom1.Namespace, custom1.Name).Return(nil, nil) + dh.mdi.On("GetIdentityByID", ctx, "ns1", custom1.ID).Return(nil, fmt.Errorf("pop")) dh.multiparty = true @@ -520,20 +450,18 @@ func TestHandleDefinitionIdentityClaimIdentityError(t *testing.T) { assert.Equal(t, HandlerResult{Action: core.ActionRetry}, action) assert.Regexp(t, "pop", err) - mim.AssertExpectations(t) - mdi.AssertExpectations(t) bs.assertNoFinalizers() } func TestHandleDefinitionIdentityMissingAuthor(t *testing.T) { dh, bs := newTestDefinitionHandler(t) - ctx := context.Background() + defer dh.cleanup(t) + ctx := context.Background() custom1, org1, claimMsg, claimData, _, _ := testCustomClaimAndVerification(t) claimMsg.Header.Author = "" - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) + dh.mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) dh.multiparty = true @@ -541,19 +469,18 @@ func TestHandleDefinitionIdentityMissingAuthor(t *testing.T) { assert.Equal(t, HandlerResult{Action: core.ActionReject}, action) assert.Error(t, err) - mim.AssertExpectations(t) bs.assertNoFinalizers() } func TestHandleDefinitionIdentityClaimBadSignature(t *testing.T) { dh, bs := newTestDefinitionHandler(t) - ctx := context.Background() + defer dh.cleanup(t) + ctx := context.Background() custom1, org1, claimMsg, claimData, _, _ := testCustomClaimAndVerification(t) claimMsg.Header.Author = org1.DID // should be the child for the claim - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) + dh.mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) dh.multiparty = true @@ -561,50 +488,48 @@ func TestHandleDefinitionIdentityClaimBadSignature(t *testing.T) { assert.Equal(t, HandlerResult{Action: core.ActionReject}, action) assert.Error(t, err) - mim.AssertExpectations(t) bs.assertNoFinalizers() } func TestHandleDefinitionIdentityVerifyChainFail(t *testing.T) { dh, bs := newTestDefinitionHandler(t) - ctx := context.Background() + defer dh.cleanup(t) + ctx := context.Background() custom1, org1, claimMsg, claimData, _, _ := testCustomClaimAndVerification(t) claimMsg.Header.Author = org1.DID // should be the child for the claim - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("VerifyIdentityChain", ctx, custom1).Return(nil, true, fmt.Errorf("pop")) + dh.mim.On("VerifyIdentityChain", ctx, custom1).Return(nil, true, fmt.Errorf("pop")) action, err := dh.HandleDefinitionBroadcast(ctx, &bs.BatchState, claimMsg, core.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: core.ActionRetry}, action) assert.Regexp(t, "pop", err) - mim.AssertExpectations(t) bs.assertNoFinalizers() } func TestHandleDefinitionIdentityVerifyChainInvalid(t *testing.T) { dh, bs := newTestDefinitionHandler(t) - ctx := context.Background() + defer dh.cleanup(t) + ctx := context.Background() custom1, org1, claimMsg, claimData, _, _ := testCustomClaimAndVerification(t) claimMsg.Header.Author = org1.DID // should be the child for the claim - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("VerifyIdentityChain", ctx, custom1).Return(nil, false, fmt.Errorf("wrong")) + dh.mim.On("VerifyIdentityChain", ctx, custom1).Return(nil, false, fmt.Errorf("wrong")) action, err := dh.HandleDefinitionBroadcast(ctx, &bs.BatchState, claimMsg, core.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: core.ActionWait}, action) assert.NoError(t, err) - mim.AssertExpectations(t) bs.assertNoFinalizers() } func TestHandleDefinitionIdentityClaimBadData(t *testing.T) { dh, bs := newTestDefinitionHandler(t) - ctx := context.Background() + defer dh.cleanup(t) + ctx := context.Background() _, org1, claimMsg, _, _, _ := testCustomClaimAndVerification(t) claimMsg.Header.Author = org1.DID // should be the child for the claim diff --git a/internal/definitions/handler_identity_update_test.go b/internal/definitions/handler_identity_update_test.go index 651d862a9d..7b95615518 100644 --- a/internal/definitions/handler_identity_update_test.go +++ b/internal/definitions/handler_identity_update_test.go @@ -23,8 +23,6 @@ import ( "testing" "github.com/hyperledger/firefly-common/pkg/fftypes" - "github.com/hyperledger/firefly/mocks/databasemocks" - "github.com/hyperledger/firefly/mocks/identitymanagermocks" "github.com/hyperledger/firefly/pkg/core" "github.com/hyperledger/firefly/pkg/database" "github.com/stretchr/testify/assert" @@ -73,17 +71,14 @@ func TestHandleDefinitionIdentityUpdateOk(t *testing.T) { org1, updateMsg, updateData, iu := testIdentityUpdate(t) - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(org1, nil) - - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("UpsertIdentity", ctx, mock.MatchedBy(func(identity *core.Identity) bool { + dh.mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(org1, nil) + dh.mdi.On("UpsertIdentity", ctx, mock.MatchedBy(func(identity *core.Identity) bool { assert.Equal(t, *updateMsg.Header.ID, *identity.Messages.Update) assert.Equal(t, org1.IdentityBase, identity.IdentityBase) assert.Equal(t, iu.Updates, identity.IdentityProfile) return true }), database.UpsertOptimizationExisting).Return(nil) - mdi.On("InsertEvent", mock.Anything, mock.MatchedBy(func(event *core.Event) bool { + dh.mdi.On("InsertEvent", mock.Anything, mock.MatchedBy(func(event *core.Event) bool { return event.Type == core.EventTypeIdentityUpdated })).Return(nil) @@ -93,9 +88,6 @@ func TestHandleDefinitionIdentityUpdateOk(t *testing.T) { err = bs.RunFinalize(ctx) assert.NoError(t, err) - - mim.AssertExpectations(t) - mdi.AssertExpectations(t) } func TestHandleDefinitionIdentityUpdateUpsertFail(t *testing.T) { @@ -104,18 +96,13 @@ func TestHandleDefinitionIdentityUpdateUpsertFail(t *testing.T) { org1, updateMsg, updateData, _ := testIdentityUpdate(t) - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(org1, nil) - - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("UpsertIdentity", ctx, mock.Anything, database.UpsertOptimizationExisting).Return(fmt.Errorf("pop")) + dh.mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(org1, nil) + dh.mdi.On("UpsertIdentity", ctx, mock.Anything, database.UpsertOptimizationExisting).Return(fmt.Errorf("pop")) action, err := dh.HandleDefinitionBroadcast(ctx, &bs.BatchState, updateMsg, core.DataArray{updateData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: core.ActionRetry}, action) assert.Regexp(t, "pop", err) - mim.AssertExpectations(t) - mdi.AssertExpectations(t) bs.assertNoFinalizers() } @@ -127,15 +114,13 @@ func TestHandleDefinitionIdentityInvalidIdentity(t *testing.T) { org1, updateMsg, updateData, _ := testIdentityUpdate(t) updateMsg.Header.Author = "wrong" - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(org1, nil) - mim.On("VerifyIdentityChain", ctx, mock.Anything).Return(nil, false, nil) + dh.mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(org1, nil) + dh.mim.On("VerifyIdentityChain", ctx, mock.Anything).Return(nil, false, nil) action, err := dh.HandleDefinitionBroadcast(ctx, &bs.BatchState, updateMsg, core.DataArray{updateData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: core.ActionReject}, action) assert.Error(t, err) - mim.AssertExpectations(t) bs.assertNoFinalizers() } @@ -147,15 +132,13 @@ func TestHandleDefinitionVerifyFail(t *testing.T) { org1, updateMsg, updateData, _ := testIdentityUpdate(t) updateMsg.Header.Author = "wrong" - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(org1, nil) - mim.On("VerifyIdentityChain", ctx, mock.Anything).Return(nil, true, fmt.Errorf("pop")) + dh.mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(org1, nil) + dh.mim.On("VerifyIdentityChain", ctx, mock.Anything).Return(nil, true, fmt.Errorf("pop")) action, err := dh.HandleDefinitionBroadcast(ctx, &bs.BatchState, updateMsg, core.DataArray{updateData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: core.ActionRetry}, action) assert.Error(t, err) - mim.AssertExpectations(t) bs.assertNoFinalizers() } @@ -167,15 +150,13 @@ func TestHandleDefinitionVerifyWait(t *testing.T) { org1, updateMsg, updateData, _ := testIdentityUpdate(t) updateMsg.Header.Author = "wrong" - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(org1, nil) - mim.On("VerifyIdentityChain", ctx, mock.Anything).Return(nil, false, fmt.Errorf("pop")) + dh.mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(org1, nil) + dh.mim.On("VerifyIdentityChain", ctx, mock.Anything).Return(nil, false, fmt.Errorf("pop")) action, err := dh.HandleDefinitionBroadcast(ctx, &bs.BatchState, updateMsg, core.DataArray{updateData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: core.ActionWait}, action) assert.NoError(t, err) - mim.AssertExpectations(t) bs.assertNoFinalizers() } @@ -185,14 +166,12 @@ func TestHandleDefinitionIdentityNotFound(t *testing.T) { org1, updateMsg, updateData, _ := testIdentityUpdate(t) - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(nil, nil) + dh.mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(nil, nil) action, err := dh.HandleDefinitionBroadcast(ctx, &bs.BatchState, updateMsg, core.DataArray{updateData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: core.ActionReject}, action) assert.Regexp(t, "FF10408", err) - mim.AssertExpectations(t) bs.assertNoFinalizers() } @@ -202,14 +181,12 @@ func TestHandleDefinitionIdentityLookupFail(t *testing.T) { org1, updateMsg, updateData, _ := testIdentityUpdate(t) - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(nil, fmt.Errorf("pop")) + dh.mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(nil, fmt.Errorf("pop")) action, err := dh.HandleDefinitionBroadcast(ctx, &bs.BatchState, updateMsg, core.DataArray{updateData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: core.ActionRetry}, action) assert.Regexp(t, "pop", err) - mim.AssertExpectations(t) bs.assertNoFinalizers() } diff --git a/internal/definitions/handler_identity_verification_test.go b/internal/definitions/handler_identity_verification_test.go index 70d97344b5..51a3e0c120 100644 --- a/internal/definitions/handler_identity_verification_test.go +++ b/internal/definitions/handler_identity_verification_test.go @@ -23,9 +23,6 @@ import ( "testing" "github.com/hyperledger/firefly-common/pkg/fftypes" - "github.com/hyperledger/firefly/mocks/databasemocks" - "github.com/hyperledger/firefly/mocks/datamocks" - "github.com/hyperledger/firefly/mocks/identitymanagermocks" "github.com/hyperledger/firefly/pkg/core" "github.com/hyperledger/firefly/pkg/database" "github.com/stretchr/testify/assert" @@ -38,32 +35,28 @@ func TestHandleDefinitionIdentityVerificationWithExistingClaimOk(t *testing.T) { custom1, org1, claimMsg, claimData, verifyMsg, verifyData := testCustomClaimAndVerification(t) - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(org1, nil) - mim.On("VerifyIdentityChain", ctx, mock.Anything).Return(custom1, false, nil) - - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("GetMessageByID", ctx, "ns1", claimMsg.Header.ID).Return(nil, nil) // Simulate pending confirm in same pin batch - mdi.On("GetIdentityByName", ctx, custom1.Type, custom1.Namespace, custom1.Name).Return(nil, nil) - mdi.On("GetIdentityByID", ctx, "ns1", custom1.ID).Return(nil, nil) - mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, "ns1", "0x12345").Return(nil, nil) - mdi.On("UpsertIdentity", ctx, mock.MatchedBy(func(identity *core.Identity) bool { + dh.mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(org1, nil) + dh.mim.On("VerifyIdentityChain", ctx, mock.Anything).Return(custom1, false, nil) + dh.mdi.On("GetMessageByID", ctx, "ns1", claimMsg.Header.ID).Return(nil, nil) // Simulate pending confirm in same pin batch + dh.mdi.On("GetIdentityByName", ctx, custom1.Type, custom1.Namespace, custom1.Name).Return(nil, nil) + dh.mdi.On("GetIdentityByID", ctx, "ns1", custom1.ID).Return(nil, nil) + dh.mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, "ns1", "0x12345").Return(nil, nil) + dh.mdi.On("UpsertIdentity", ctx, mock.MatchedBy(func(identity *core.Identity) bool { assert.Equal(t, *claimMsg.Header.ID, *identity.Messages.Claim) assert.Equal(t, *verifyMsg.Header.ID, *identity.Messages.Verification) return true }), database.UpsertOptimizationNew).Return(nil) - mdi.On("UpsertVerifier", ctx, mock.MatchedBy(func(verifier *core.Verifier) bool { + dh.mdi.On("UpsertVerifier", ctx, mock.MatchedBy(func(verifier *core.Verifier) bool { assert.Equal(t, core.VerifierTypeEthAddress, verifier.Type) assert.Equal(t, "0x12345", verifier.Value) assert.Equal(t, *custom1.ID, *verifier.Identity) return true }), database.UpsertOptimizationNew).Return(nil) - mdi.On("InsertEvent", mock.Anything, mock.MatchedBy(func(event *core.Event) bool { + dh.mdi.On("InsertEvent", mock.Anything, mock.MatchedBy(func(event *core.Event) bool { return event.Type == core.EventTypeIdentityConfirmed })).Return(nil) - mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(core.DataArray{claimData}, true, nil) + dh.mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(core.DataArray{claimData}, true, nil) dh.multiparty = true @@ -76,10 +69,6 @@ func TestHandleDefinitionIdentityVerificationWithExistingClaimOk(t *testing.T) { err = bs.RunFinalize(ctx) assert.NoError(t, err) - - mim.AssertExpectations(t) - mdi.AssertExpectations(t) - mdm.AssertExpectations(t) } func TestHandleDefinitionIdentityVerificationIncompleteClaimData(t *testing.T) { @@ -89,22 +78,14 @@ func TestHandleDefinitionIdentityVerificationIncompleteClaimData(t *testing.T) { _, org1, claimMsg, _, verifyMsg, verifyData := testCustomClaimAndVerification(t) claimMsg.State = core.MessageStateConfirmed - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(org1, nil) - - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("GetMessageByID", ctx, "ns1", claimMsg.Header.ID).Return(claimMsg, nil) - - mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(core.DataArray{}, false, nil) + dh.mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(org1, nil) + dh.mdi.On("GetMessageByID", ctx, "ns1", claimMsg.Header.ID).Return(claimMsg, nil) + dh.mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(core.DataArray{}, false, nil) action, err := dh.HandleDefinitionBroadcast(ctx, &bs.BatchState, verifyMsg, core.DataArray{verifyData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: core.ActionConfirm}, action) assert.NoError(t, err) - mim.AssertExpectations(t) - mdi.AssertExpectations(t) - mdm.AssertExpectations(t) bs.assertNoFinalizers() } @@ -115,22 +96,14 @@ func TestHandleDefinitionIdentityVerificationClaimDataFail(t *testing.T) { _, org1, claimMsg, _, verifyMsg, verifyData := testCustomClaimAndVerification(t) claimMsg.State = core.MessageStateConfirmed - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(org1, nil) - - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("GetMessageByID", ctx, "ns1", claimMsg.Header.ID).Return(claimMsg, nil) - - mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(nil, false, fmt.Errorf("pop")) + dh.mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(org1, nil) + dh.mdi.On("GetMessageByID", ctx, "ns1", claimMsg.Header.ID).Return(claimMsg, nil) + dh.mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(nil, false, fmt.Errorf("pop")) action, err := dh.HandleDefinitionBroadcast(ctx, &bs.BatchState, verifyMsg, core.DataArray{verifyData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: core.ActionRetry}, action) assert.Regexp(t, "pop", err) - mim.AssertExpectations(t) - mdi.AssertExpectations(t) - mdm.AssertExpectations(t) bs.assertNoFinalizers() } @@ -142,18 +115,13 @@ func TestHandleDefinitionIdentityVerificationClaimHashMismatchl(t *testing.T) { claimMsg.State = core.MessageStateConfirmed claimMsg.Hash = fftypes.NewRandB32() - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(org1, nil) - - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("GetMessageByID", ctx, "ns1", claimMsg.Header.ID).Return(claimMsg, nil) + dh.mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(org1, nil) + dh.mdi.On("GetMessageByID", ctx, "ns1", claimMsg.Header.ID).Return(claimMsg, nil) action, err := dh.HandleDefinitionBroadcast(ctx, &bs.BatchState, verifyMsg, core.DataArray{verifyData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: core.ActionReject}, action) assert.Error(t, err) - mim.AssertExpectations(t) - mdi.AssertExpectations(t) bs.assertNoFinalizers() } @@ -163,18 +131,13 @@ func TestHandleDefinitionIdentityVerificationBeforeClaim(t *testing.T) { _, org1, claimMsg, _, verifyMsg, verifyData := testCustomClaimAndVerification(t) - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(org1, nil) - - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("GetMessageByID", ctx, "ns1", claimMsg.Header.ID).Return(nil, nil) + dh.mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(org1, nil) + dh.mdi.On("GetMessageByID", ctx, "ns1", claimMsg.Header.ID).Return(nil, nil) action, err := dh.HandleDefinitionBroadcast(ctx, &bs.BatchState, verifyMsg, core.DataArray{verifyData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: core.ActionConfirm}, action) assert.NoError(t, err) - mim.AssertExpectations(t) - mdi.AssertExpectations(t) bs.assertNoFinalizers() } @@ -184,18 +147,13 @@ func TestHandleDefinitionIdentityVerificationClaimLookupFail(t *testing.T) { _, org1, claimMsg, _, verifyMsg, verifyData := testCustomClaimAndVerification(t) - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(org1, nil) - - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("GetMessageByID", ctx, "ns1", claimMsg.Header.ID).Return(nil, fmt.Errorf("pop")) + dh.mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(org1, nil) + dh.mdi.On("GetMessageByID", ctx, "ns1", claimMsg.Header.ID).Return(nil, fmt.Errorf("pop")) action, err := dh.HandleDefinitionBroadcast(ctx, &bs.BatchState, verifyMsg, core.DataArray{verifyData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: core.ActionRetry}, action) assert.Regexp(t, "pop", err) - mim.AssertExpectations(t) - mdi.AssertExpectations(t) bs.assertNoFinalizers() } @@ -206,14 +164,12 @@ func TestHandleDefinitionIdentityVerificationWrongSigner(t *testing.T) { _, org1, _, _, verifyMsg, verifyData := testCustomClaimAndVerification(t) verifyMsg.Header.Author = "wrong" - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(org1, nil) + dh.mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(org1, nil) action, err := dh.HandleDefinitionBroadcast(ctx, &bs.BatchState, verifyMsg, core.DataArray{verifyData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: core.ActionReject}, action) assert.Error(t, err) - mim.AssertExpectations(t) bs.assertNoFinalizers() } @@ -223,14 +179,12 @@ func TestHandleDefinitionIdentityVerificationCheckParentNotFound(t *testing.T) { _, org1, _, _, verifyMsg, verifyData := testCustomClaimAndVerification(t) - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(nil, nil) + dh.mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(nil, nil) action, err := dh.HandleDefinitionBroadcast(ctx, &bs.BatchState, verifyMsg, core.DataArray{verifyData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: core.ActionReject}, action) assert.Error(t, err) - mim.AssertExpectations(t) bs.assertNoFinalizers() } @@ -240,14 +194,12 @@ func TestHandleDefinitionIdentityVerificationCheckParentFail(t *testing.T) { _, org1, _, _, verifyMsg, verifyData := testCustomClaimAndVerification(t) - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(nil, fmt.Errorf("pop")) + dh.mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(nil, fmt.Errorf("pop")) action, err := dh.HandleDefinitionBroadcast(ctx, &bs.BatchState, verifyMsg, core.DataArray{verifyData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: core.ActionRetry}, action) assert.Regexp(t, "pop", err) - mim.AssertExpectations(t) bs.assertNoFinalizers() } diff --git a/internal/definitions/handler_network_node_test.go b/internal/definitions/handler_network_node_test.go index 384a2e77b3..0877ee7a4d 100644 --- a/internal/definitions/handler_network_node_test.go +++ b/internal/definitions/handler_network_node_test.go @@ -23,9 +23,6 @@ import ( "testing" "github.com/hyperledger/firefly-common/pkg/fftypes" - "github.com/hyperledger/firefly/mocks/databasemocks" - "github.com/hyperledger/firefly/mocks/dataexchangemocks" - "github.com/hyperledger/firefly/mocks/identitymanagermocks" "github.com/hyperledger/firefly/pkg/core" "github.com/hyperledger/firefly/pkg/database" "github.com/stretchr/testify/assert" @@ -102,34 +99,29 @@ func TestHandleDeprecatedNodeDefinitionOK(t *testing.T) { node, msg, data := testDeprecatedRootNode(t) parent, _, _ := testDeprecatedRootOrg(t) - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("FindIdentityForVerifier", ctx, []core.IdentityType{core.IdentityTypeOrg}, &core.VerifierRef{ + dh.mim.On("FindIdentityForVerifier", ctx, []core.IdentityType{core.IdentityTypeOrg}, &core.VerifierRef{ Type: core.VerifierTypeEthAddress, Value: node.Owner, }).Return(parent.Migrated().Identity, nil) - mim.On("VerifyIdentityChain", ctx, mock.Anything).Return(parent.Migrated().Identity, false, nil) - - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("GetIdentityByName", ctx, core.IdentityTypeNode, "ns1", node.Name).Return(nil, nil) - mdi.On("GetIdentityByID", ctx, "ns1", node.ID).Return(nil, nil) - mdi.On("GetVerifierByValue", ctx, core.VerifierTypeFFDXPeerID, "ns1", "member_0").Return(nil, nil) - mdi.On("UpsertIdentity", ctx, mock.MatchedBy(func(identity *core.Identity) bool { + dh.mim.On("VerifyIdentityChain", ctx, mock.Anything).Return(parent.Migrated().Identity, false, nil) + dh.mdi.On("GetIdentityByName", ctx, core.IdentityTypeNode, "ns1", node.Name).Return(nil, nil) + dh.mdi.On("GetIdentityByID", ctx, "ns1", node.ID).Return(nil, nil) + dh.mdi.On("GetVerifierByValue", ctx, core.VerifierTypeFFDXPeerID, "ns1", "member_0").Return(nil, nil) + dh.mdi.On("UpsertIdentity", ctx, mock.MatchedBy(func(identity *core.Identity) bool { assert.Equal(t, *msg.Header.ID, *identity.Messages.Claim) return true }), database.UpsertOptimizationNew).Return(nil) - mdi.On("UpsertVerifier", ctx, mock.MatchedBy(func(verifier *core.Verifier) bool { + dh.mdi.On("UpsertVerifier", ctx, mock.MatchedBy(func(verifier *core.Verifier) bool { assert.Equal(t, core.VerifierTypeFFDXPeerID, verifier.Type) assert.Equal(t, "member_0", verifier.Value) assert.Equal(t, *node.ID, *verifier.Identity) return true }), database.UpsertOptimizationNew).Return(nil) - mdi.On("InsertEvent", mock.Anything, mock.MatchedBy(func(event *core.Event) bool { + dh.mdi.On("InsertEvent", mock.Anything, mock.MatchedBy(func(event *core.Event) bool { return event.Type == core.EventTypeIdentityConfirmed })).Return(nil) - - mdx := dh.exchange.(*dataexchangemocks.Plugin) - mdx.On("GetPeerID", node.DX.Endpoint).Return("member_0") - mdx.On("AddNode", ctx, "ns1", node.Name, node.DX.Endpoint).Return(nil) + dh.mdx.On("GetPeerID", node.DX.Endpoint).Return("member_0") + dh.mdx.On("AddNode", ctx, "ns1", node.Name, node.DX.Endpoint).Return(nil) dh.multiparty = true @@ -141,11 +133,6 @@ func TestHandleDeprecatedNodeDefinitionOK(t *testing.T) { assert.NoError(t, err) err = bs.RunFinalize(ctx) assert.NoError(t, err) - - mim.AssertExpectations(t) - mdi.AssertExpectations(t) - mdx.AssertExpectations(t) - } func TestHandleDeprecatedNodeDefinitionBadData(t *testing.T) { @@ -165,8 +152,7 @@ func TestHandleDeprecatedNodeDefinitionFailOrgLookup(t *testing.T) { node, msg, data := testDeprecatedRootNode(t) - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("FindIdentityForVerifier", ctx, []core.IdentityType{core.IdentityTypeOrg}, &core.VerifierRef{ + dh.mim.On("FindIdentityForVerifier", ctx, []core.IdentityType{core.IdentityTypeOrg}, &core.VerifierRef{ Type: core.VerifierTypeEthAddress, Value: node.Owner, }).Return(nil, fmt.Errorf("pop")) @@ -175,7 +161,6 @@ func TestHandleDeprecatedNodeDefinitionFailOrgLookup(t *testing.T) { assert.Equal(t, HandlerResult{Action: core.ActionRetry}, action) assert.Regexp(t, "pop", err) - mim.AssertExpectations(t) bs.assertNoFinalizers() } @@ -186,8 +171,7 @@ func TestHandleDeprecatedNodeDefinitionOrgNotFound(t *testing.T) { node, msg, data := testDeprecatedRootNode(t) - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("FindIdentityForVerifier", ctx, []core.IdentityType{core.IdentityTypeOrg}, &core.VerifierRef{ + dh.mim.On("FindIdentityForVerifier", ctx, []core.IdentityType{core.IdentityTypeOrg}, &core.VerifierRef{ Type: core.VerifierTypeEthAddress, Value: node.Owner, }).Return(nil, nil) @@ -196,7 +180,6 @@ func TestHandleDeprecatedNodeDefinitionOrgNotFound(t *testing.T) { assert.Equal(t, HandlerResult{Action: core.ActionReject}, action) assert.Error(t, err) - mim.AssertExpectations(t) bs.assertNoFinalizers() } diff --git a/internal/definitions/handler_network_org_test.go b/internal/definitions/handler_network_org_test.go index 82b5977931..86a63dbca1 100644 --- a/internal/definitions/handler_network_org_test.go +++ b/internal/definitions/handler_network_org_test.go @@ -22,8 +22,6 @@ import ( "testing" "github.com/hyperledger/firefly-common/pkg/fftypes" - "github.com/hyperledger/firefly/mocks/databasemocks" - "github.com/hyperledger/firefly/mocks/identitymanagermocks" "github.com/hyperledger/firefly/pkg/core" "github.com/hyperledger/firefly/pkg/database" "github.com/stretchr/testify/assert" @@ -89,24 +87,21 @@ func TestHandleDeprecatedOrgDefinitionOK(t *testing.T) { org, msg, data := testDeprecatedRootOrg(t) - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("VerifyIdentityChain", ctx, mock.Anything).Return(nil, false, nil) - - mdi := dh.database.(*databasemocks.Plugin) - mdi.On("GetIdentityByName", ctx, core.IdentityTypeOrg, "ns1", org.Name).Return(nil, nil) - mdi.On("GetIdentityByID", ctx, "ns1", org.ID).Return(nil, nil) - mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, "ns1", msg.Header.Key).Return(nil, nil) - mdi.On("UpsertIdentity", ctx, mock.MatchedBy(func(identity *core.Identity) bool { + dh.mim.On("VerifyIdentityChain", ctx, mock.Anything).Return(nil, false, nil) + dh.mdi.On("GetIdentityByName", ctx, core.IdentityTypeOrg, "ns1", org.Name).Return(nil, nil) + dh.mdi.On("GetIdentityByID", ctx, "ns1", org.ID).Return(nil, nil) + dh.mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, "ns1", msg.Header.Key).Return(nil, nil) + dh.mdi.On("UpsertIdentity", ctx, mock.MatchedBy(func(identity *core.Identity) bool { assert.Equal(t, *msg.Header.ID, *identity.Messages.Claim) return true }), database.UpsertOptimizationNew).Return(nil) - mdi.On("UpsertVerifier", ctx, mock.MatchedBy(func(verifier *core.Verifier) bool { + dh.mdi.On("UpsertVerifier", ctx, mock.MatchedBy(func(verifier *core.Verifier) bool { assert.Equal(t, core.VerifierTypeEthAddress, verifier.Type) assert.Equal(t, msg.Header.Key, verifier.Value) assert.Equal(t, *org.ID, *verifier.Identity) return true }), database.UpsertOptimizationNew).Return(nil) - mdi.On("InsertEvent", mock.Anything, mock.MatchedBy(func(event *core.Event) bool { + dh.mdi.On("InsertEvent", mock.Anything, mock.MatchedBy(func(event *core.Event) bool { return event.Type == core.EventTypeIdentityConfirmed })).Return(nil) @@ -118,9 +113,6 @@ func TestHandleDeprecatedOrgDefinitionOK(t *testing.T) { err = bs.RunFinalize(ctx) assert.NoError(t, err) - - mim.AssertExpectations(t) - mdi.AssertExpectations(t) } func TestHandleDeprecatedOrgDefinitionBadData(t *testing.T) { diff --git a/internal/definitions/handler_test.go b/internal/definitions/handler_test.go index 0a4164cc7e..44559f0399 100644 --- a/internal/definitions/handler_test.go +++ b/internal/definitions/handler_test.go @@ -33,7 +33,29 @@ import ( "github.com/stretchr/testify/assert" ) -func newTestDefinitionHandler(t *testing.T) (*definitionHandler, *testDefinitionBatchState) { +type testDefinitionHandler struct { + definitionHandler + + mdi *databasemocks.Plugin + mbi *blockchainmocks.Plugin + mdx *dataexchangemocks.Plugin + mim *identitymanagermocks.Manager + mdm *datamocks.Manager + mam *assetmocks.Manager + mcm *contractmocks.Manager +} + +func (tdh *testDefinitionHandler) cleanup(t *testing.T) { + tdh.mdi.AssertExpectations(t) + tdh.mbi.AssertExpectations(t) + tdh.mdx.AssertExpectations(t) + tdh.mim.AssertExpectations(t) + tdh.mdm.AssertExpectations(t) + tdh.mam.AssertExpectations(t) + tdh.mcm.AssertExpectations(t) +} + +func newTestDefinitionHandler(t *testing.T) (*testDefinitionHandler, *testDefinitionBatchState) { mdi := &databasemocks.Plugin{} mbi := &blockchainmocks.Plugin{} mdx := &dataexchangemocks.Plugin{} @@ -46,7 +68,16 @@ func newTestDefinitionHandler(t *testing.T) (*definitionHandler, *testDefinition mbi.On("VerifierType").Return(core.VerifierTypeEthAddress).Maybe() ns := &core.Namespace{Name: "ns1", NetworkName: "ns1"} dh, _ := newDefinitionHandler(context.Background(), ns, false, mdi, mbi, mdx, mdm, mim, mam, mcm, tokenNames) - return dh, newTestDefinitionBatchState(t) + return &testDefinitionHandler{ + definitionHandler: *dh, + mdi: mdi, + mbi: mbi, + mdx: mdx, + mim: mim, + mdm: mdm, + mam: mam, + mcm: mcm, + }, newTestDefinitionBatchState(t) } type testDefinitionBatchState struct { @@ -77,6 +108,8 @@ func TestInitFail(t *testing.T) { func TestHandleDefinitionBroadcastUnknown(t *testing.T) { dh, bs := newTestDefinitionHandler(t) + defer dh.cleanup(t) + action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, &core.Message{ Header: core.MessageHeader{ Tag: "unknown", @@ -89,6 +122,8 @@ func TestHandleDefinitionBroadcastUnknown(t *testing.T) { func TestGetSystemBroadcastPayloadMissingData(t *testing.T) { dh, _ := newTestDefinitionHandler(t) + defer dh.cleanup(t) + valid := dh.getSystemBroadcastPayload(context.Background(), &core.Message{ Header: core.MessageHeader{ Tag: "unknown", @@ -99,6 +134,8 @@ func TestGetSystemBroadcastPayloadMissingData(t *testing.T) { func TestGetSystemBroadcastPayloadBadJSON(t *testing.T) { dh, _ := newTestDefinitionHandler(t) + defer dh.cleanup(t) + valid := dh.getSystemBroadcastPayload(context.Background(), &core.Message{ Header: core.MessageHeader{ Tag: "unknown", diff --git a/internal/definitions/handler_tokenpool.go b/internal/definitions/handler_tokenpool.go index db109812a1..048f59ceff 100644 --- a/internal/definitions/handler_tokenpool.go +++ b/internal/definitions/handler_tokenpool.go @@ -18,6 +18,7 @@ package definitions import ( "context" + "fmt" "github.com/hyperledger/firefly-common/pkg/i18n" "github.com/hyperledger/firefly-common/pkg/log" @@ -27,12 +28,12 @@ import ( ) func (dh *definitionHandler) handleTokenPoolBroadcast(ctx context.Context, state *core.BatchState, msg *core.Message, data core.DataArray) (HandlerResult, error) { - var announce core.TokenPoolAnnouncement - if valid := dh.getSystemBroadcastPayload(ctx, msg, data, &announce); !valid { + var definition core.TokenPoolDefinition + if valid := dh.getSystemBroadcastPayload(ctx, msg, data, &definition); !valid { return HandlerResult{Action: core.ActionReject}, i18n.NewError(ctx, coremsgs.MsgDefRejectedBadPayload, "token pool", msg.Header.ID) } - pool := announce.Pool + pool := definition.Pool // Map remote connector name -> local name if localName, ok := dh.tokenNames[pool.Connector]; ok { pool.Connector = localName @@ -41,34 +42,58 @@ func (dh *definitionHandler) handleTokenPoolBroadcast(ctx context.Context, state return HandlerResult{Action: core.ActionReject}, i18n.NewError(ctx, coremsgs.MsgInvalidConnectorName, pool.Connector, "token") } + org, err := dh.identity.GetRootOrgDID(ctx) + if err != nil { + return HandlerResult{Action: core.ActionRetry}, err + } + isAuthor := org == msg.Header.Author + pool.Message = msg.Header.ID - return dh.handleTokenPoolDefinition(ctx, state, pool) + pool.Name = pool.NetworkName + pool.Published = true + return dh.handleTokenPoolDefinition(ctx, state, pool, isAuthor) } -func (dh *definitionHandler) handleTokenPoolDefinition(ctx context.Context, state *core.BatchState, pool *core.TokenPool) (HandlerResult, error) { +func (dh *definitionHandler) handleTokenPoolDefinition(ctx context.Context, state *core.BatchState, pool *core.TokenPool, isAuthor bool) (HandlerResult, error) { // Set an event correlator, so that if we reject then the sync-async bridge action can know // from the event (without downloading and parsing the msg) correlator := pool.ID + // Attempt to create the pool in pending state pool.Namespace = dh.namespace.Name - if err := pool.Validate(ctx); err != nil { - return HandlerResult{Action: core.ActionReject, CustomCorrelator: correlator}, i18n.WrapError(ctx, err, coremsgs.MsgDefRejectedValidateFail, "token pool", pool.ID) - } + pool.Active = false + for i := 1; ; i++ { + if err := pool.Validate(ctx); err != nil { + return HandlerResult{Action: core.ActionReject, CustomCorrelator: correlator}, i18n.WrapError(ctx, err, coremsgs.MsgDefRejectedValidateFail, "token pool", pool.ID) + } - // Check if pool has already been confirmed on chain (and confirm the message if so) - if existingPool, err := dh.database.GetTokenPoolByID(ctx, dh.namespace.Name, pool.ID); err != nil { - return HandlerResult{Action: core.ActionRetry}, err - } else if existingPool != nil && existingPool.State == core.TokenPoolStateConfirmed { - return HandlerResult{Action: core.ActionConfirm, CustomCorrelator: correlator}, nil - } + // Check if the pool conflicts with an existing pool + existing, err := dh.database.InsertOrGetTokenPool(ctx, pool) + if err != nil { + return HandlerResult{Action: core.ActionRetry}, err + } - // Create the pool in pending state - pool.State = core.TokenPoolStatePending - if err := dh.database.UpsertTokenPool(ctx, pool); err != nil { - if err == database.IDMismatch { - return HandlerResult{Action: core.ActionReject, CustomCorrelator: correlator}, i18n.NewError(ctx, coremsgs.MsgDefRejectedIDMismatch, "token pool", pool.ID) + if existing == nil { + // No conflict - new pool was inserted successfully + break } - return HandlerResult{Action: core.ActionRetry}, err + + if pool.Published { + if existing.ID.Equals(pool.ID) { + // ID conflict - check if this matches (or should overwrite) the existing record + action, err := dh.reconcilePublishedPool(ctx, existing, pool, isAuthor) + return HandlerResult{Action: action, CustomCorrelator: correlator}, err + } + + if existing.Name == pool.Name { + // Local name conflict - generate a unique name and try again + pool.Name = fmt.Sprintf("%s-%d", pool.NetworkName, i) + continue + } + } + + // Any other conflict - reject + return HandlerResult{Action: core.ActionReject, CustomCorrelator: correlator}, i18n.NewError(ctx, coremsgs.MsgDefRejectedConflict, "token pool", pool.ID, existing.ID) } // Message will remain unconfirmed, but plugin will be notified to activate the pool @@ -82,3 +107,26 @@ func (dh *definitionHandler) handleTokenPoolDefinition(ctx context.Context, stat }) return HandlerResult{Action: core.ActionWait, CustomCorrelator: correlator}, nil } + +func (dh *definitionHandler) reconcilePublishedPool(ctx context.Context, existing, pool *core.TokenPool, isAuthor bool) (core.MessageAction, error) { + if existing.Message.Equals(pool.Message) { + if existing.Active { + // Pool was previously activated - this must be a rewind to confirm the message + return core.ActionConfirm, nil + } else { + // Pool is still awaiting activation + return core.ActionWait, nil + } + } + + if existing.Message == nil && isAuthor { + // Pool was previously unpublished - if it was now published by this node, upsert the new version + pool.Name = existing.Name + if err := dh.database.UpsertTokenPool(ctx, pool, database.UpsertOptimizationExisting); err != nil { + return core.ActionRetry, err + } + return core.ActionConfirm, nil + } + + return core.ActionReject, i18n.NewError(ctx, coremsgs.MsgDefRejectedConflict, "token pool", pool.ID, existing.ID) +} diff --git a/internal/definitions/handler_tokenpool_test.go b/internal/definitions/handler_tokenpool_test.go index 44d551de29..37483c681a 100644 --- a/internal/definitions/handler_tokenpool_test.go +++ b/internal/definitions/handler_tokenpool_test.go @@ -23,41 +23,44 @@ import ( "testing" "github.com/hyperledger/firefly-common/pkg/fftypes" - "github.com/hyperledger/firefly/mocks/assetmocks" - "github.com/hyperledger/firefly/mocks/databasemocks" "github.com/hyperledger/firefly/pkg/core" "github.com/hyperledger/firefly/pkg/database" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" ) -func newPoolAnnouncement() *core.TokenPoolAnnouncement { +func newPoolDefinition() *core.TokenPoolDefinition { pool := &core.TokenPool{ - ID: fftypes.NewUUID(), - Namespace: "ns1", - Name: "name1", - Type: core.TokenTypeFungible, - Locator: "12345", - Symbol: "COIN", + ID: fftypes.NewUUID(), + Namespace: "ns1", + Name: "name1", + NetworkName: "name1", + Type: core.TokenTypeFungible, + Locator: "12345", + Symbol: "COIN", TX: core.TransactionRef{ Type: core.TransactionTypeTokenPool, ID: fftypes.NewUUID(), }, Connector: "remote1", + Published: true, } - return &core.TokenPoolAnnouncement{ + return &core.TokenPoolDefinition{ Pool: pool, } } -func buildPoolDefinitionMessage(announce *core.TokenPoolAnnouncement) (*core.Message, core.DataArray, error) { +func buildPoolDefinitionMessage(definition *core.TokenPoolDefinition) (*core.Message, core.DataArray, error) { msg := &core.Message{ Header: core.MessageHeader{ ID: fftypes.NewUUID(), Tag: core.SystemTagDefinePool, + SignerRef: core.SignerRef{ + Author: "firefly:org1", + }, }, } - b, err := json.Marshal(announce) + b, err := json.Marshal(definition) if err != nil { return nil, nil, err } @@ -68,44 +71,39 @@ func buildPoolDefinitionMessage(announce *core.TokenPoolAnnouncement) (*core.Mes } func TestHandleDefinitionBroadcastTokenPoolActivateOK(t *testing.T) { - sh, bs := newTestDefinitionHandler(t) + dh, bs := newTestDefinitionHandler(t) - announce := newPoolAnnouncement() - pool := announce.Pool - msg, data, err := buildPoolDefinitionMessage(announce) + definition := newPoolDefinition() + pool := definition.Pool + msg, data, err := buildPoolDefinitionMessage(definition) assert.NoError(t, err) - mdi := sh.database.(*databasemocks.Plugin) - mam := sh.assets.(*assetmocks.Manager) - mdi.On("GetTokenPoolByID", context.Background(), "ns1", pool.ID).Return(nil, nil) - mdi.On("UpsertTokenPool", context.Background(), mock.MatchedBy(func(p *core.TokenPool) bool { + dh.mdi.On("InsertOrGetTokenPool", context.Background(), mock.MatchedBy(func(p *core.TokenPool) bool { return *p.ID == *pool.ID && p.Message == msg.Header.ID && p.Connector == "connector1" - })).Return(nil) - mam.On("ActivateTokenPool", context.Background(), mock.AnythingOfType("*core.TokenPool")).Return(nil) + })).Return(nil, nil) + dh.mam.On("ActivateTokenPool", context.Background(), mock.AnythingOfType("*core.TokenPool")).Return(nil) + dh.mim.On("GetRootOrgDID", context.Background()).Return("firefly:org1", nil) - action, err := sh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, msg, data, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, msg, data, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: core.ActionWait, CustomCorrelator: pool.ID}, action) assert.NoError(t, err) err = bs.RunPreFinalize(context.Background()) assert.NoError(t, err) - - mdi.AssertExpectations(t) } func TestHandleDefinitionBroadcastTokenPoolBadConnector(t *testing.T) { - sh, bs := newTestDefinitionHandler(t) + dh, bs := newTestDefinitionHandler(t) - announce := newPoolAnnouncement() - pool := announce.Pool - pool.Name = "//bad" - msg, data, err := buildPoolDefinitionMessage(announce) + definition := newPoolDefinition() + pool := definition.Pool + pool.NetworkName = "//bad" + msg, data, err := buildPoolDefinitionMessage(definition) assert.NoError(t, err) - mam := sh.assets.(*assetmocks.Manager) - mam.On("ActivateTokenPool", context.Background(), mock.AnythingOfType("*core.TokenPool")).Return(nil) + dh.mim.On("GetRootOrgDID", context.Background()).Return("firefly:org1", nil) - action, err := sh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, msg, data, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, msg, data, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: core.ActionReject, CustomCorrelator: pool.ID}, action) assert.Regexp(t, "FF10403", err) @@ -113,158 +111,304 @@ func TestHandleDefinitionBroadcastTokenPoolBadConnector(t *testing.T) { assert.NoError(t, err) } -func TestHandleDefinitionBroadcastTokenPoolGetPoolFail(t *testing.T) { - sh, bs := newTestDefinitionHandler(t) +func TestHandleDefinitionBroadcastTokenPoolNameExists(t *testing.T) { + dh, bs := newTestDefinitionHandler(t) - announce := newPoolAnnouncement() - pool := announce.Pool - msg, data, err := buildPoolDefinitionMessage(announce) + definition := newPoolDefinition() + pool := definition.Pool + msg, data, err := buildPoolDefinitionMessage(definition) assert.NoError(t, err) - mdi := sh.database.(*databasemocks.Plugin) - mdi.On("GetTokenPoolByID", context.Background(), "ns1", pool.ID).Return(nil, fmt.Errorf("pop")) + existing := &core.TokenPool{ + Name: "name1", + } - action, err := sh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, msg, data, fftypes.NewUUID()) - assert.Equal(t, HandlerResult{Action: core.ActionRetry}, action) - assert.EqualError(t, err, "pop") + dh.mdi.On("InsertOrGetTokenPool", context.Background(), mock.MatchedBy(func(p *core.TokenPool) bool { + return *p.ID == *pool.ID && p.Message == msg.Header.ID && p.Name == "name1" + })).Return(existing, nil) + dh.mdi.On("InsertOrGetTokenPool", context.Background(), mock.MatchedBy(func(p *core.TokenPool) bool { + return *p.ID == *pool.ID && p.Message == msg.Header.ID && p.Name == "name1-1" + })).Return(nil, nil) + dh.mim.On("GetRootOrgDID", context.Background()).Return("firefly:org1", nil) + + action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, msg, data, fftypes.NewUUID()) + assert.Equal(t, HandlerResult{Action: core.ActionWait, CustomCorrelator: pool.ID}, action) + assert.NoError(t, err) +} + +func TestHandleDefinitionLocalTokenPoolNameExists(t *testing.T) { + dh, bs := newTestDefinitionHandler(t) + + definition := newPoolDefinition() + pool := definition.Pool + pool.Published = false + + existing := &core.TokenPool{ + Active: true, + Name: "name1", + } + + dh.mdi.On("InsertOrGetTokenPool", context.Background(), mock.MatchedBy(func(p *core.TokenPool) bool { + return *p.ID == *pool.ID && p.Name == "name1" + })).Return(existing, nil) + + action, err := dh.handleTokenPoolDefinition(context.Background(), &bs.BatchState, pool, true) + assert.Equal(t, HandlerResult{Action: core.ActionReject, CustomCorrelator: pool.ID}, action) + assert.Error(t, err) + + bs.assertNoFinalizers() +} + +func TestHandleDefinitionBroadcastTokenPoolNetworkNameExists(t *testing.T) { + dh, bs := newTestDefinitionHandler(t) + + definition := newPoolDefinition() + pool := definition.Pool + msg, data, err := buildPoolDefinitionMessage(definition) + assert.NoError(t, err) + + existing := &core.TokenPool{ + NetworkName: "name1", + } + + dh.mdi.On("InsertOrGetTokenPool", context.Background(), mock.MatchedBy(func(p *core.TokenPool) bool { + return *p.ID == *pool.ID && p.Message == msg.Header.ID + })).Return(existing, nil) + dh.mim.On("GetRootOrgDID", context.Background()).Return("firefly:org1", nil) + + action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, msg, data, fftypes.NewUUID()) + assert.Equal(t, HandlerResult{Action: core.ActionReject, CustomCorrelator: pool.ID}, action) + assert.Error(t, err) - mdi.AssertExpectations(t) bs.assertNoFinalizers() } -func TestHandleDefinitionBroadcastTokenPoolExisting(t *testing.T) { - sh, bs := newTestDefinitionHandler(t) +func TestHandleDefinitionBroadcastTokenPoolExistingConfirmed(t *testing.T) { + dh, bs := newTestDefinitionHandler(t) + + definition := newPoolDefinition() + pool := definition.Pool + msg, data, err := buildPoolDefinitionMessage(definition) + assert.NoError(t, err) + + existing := &core.TokenPool{ + ID: pool.ID, + Active: true, + Message: msg.Header.ID, + } + + dh.mdi.On("InsertOrGetTokenPool", context.Background(), mock.MatchedBy(func(p *core.TokenPool) bool { + return *p.ID == *pool.ID && p.Message == msg.Header.ID + })).Return(existing, nil) + dh.mim.On("GetRootOrgDID", context.Background()).Return("firefly:org1", nil) + + action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, msg, data, fftypes.NewUUID()) + assert.Equal(t, HandlerResult{Action: core.ActionConfirm, CustomCorrelator: pool.ID}, action) + assert.NoError(t, err) +} + +func TestHandleDefinitionBroadcastTokenPoolExistingWaiting(t *testing.T) { + dh, bs := newTestDefinitionHandler(t) - announce := newPoolAnnouncement() - pool := announce.Pool - msg, data, err := buildPoolDefinitionMessage(announce) + definition := newPoolDefinition() + pool := definition.Pool + msg, data, err := buildPoolDefinitionMessage(definition) assert.NoError(t, err) - mdi := sh.database.(*databasemocks.Plugin) - mam := sh.assets.(*assetmocks.Manager) - mdi.On("GetTokenPoolByID", context.Background(), "ns1", pool.ID).Return(&core.TokenPool{}, nil) - mdi.On("UpsertTokenPool", context.Background(), mock.MatchedBy(func(p *core.TokenPool) bool { + existing := &core.TokenPool{ + ID: pool.ID, + Active: false, + Message: msg.Header.ID, + } + + dh.mdi.On("InsertOrGetTokenPool", context.Background(), mock.MatchedBy(func(p *core.TokenPool) bool { return *p.ID == *pool.ID && p.Message == msg.Header.ID - })).Return(nil) - mam.On("ActivateTokenPool", context.Background(), mock.AnythingOfType("*core.TokenPool")).Return(nil) + })).Return(existing, nil) + dh.mim.On("GetRootOrgDID", context.Background()).Return("firefly:org1", nil) - action, err := sh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, msg, data, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, msg, data, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: core.ActionWait, CustomCorrelator: pool.ID}, action) assert.NoError(t, err) +} - err = bs.RunPreFinalize(context.Background()) +func TestHandleDefinitionBroadcastTokenPoolExistingPublish(t *testing.T) { + dh, bs := newTestDefinitionHandler(t) + + definition := newPoolDefinition() + pool := definition.Pool + msg, data, err := buildPoolDefinitionMessage(definition) assert.NoError(t, err) + existing := &core.TokenPool{ + ID: pool.ID, + Active: true, + Name: "existing-pool", + } + newPool := *pool + newPool.Name = existing.Name + newPool.Connector = "connector1" + newPool.Published = true + newPool.Message = msg.Header.ID + newPool.Active = false + + dh.mdi.On("InsertOrGetTokenPool", context.Background(), mock.MatchedBy(func(p *core.TokenPool) bool { + return *p.ID == *pool.ID && p.Message == msg.Header.ID + })).Return(existing, nil) + dh.mdi.On("UpsertTokenPool", context.Background(), &newPool, database.UpsertOptimizationExisting).Return(nil) + dh.mim.On("GetRootOrgDID", context.Background()).Return("firefly:org1", nil) + + action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, msg, data, fftypes.NewUUID()) + assert.Equal(t, HandlerResult{Action: core.ActionConfirm, CustomCorrelator: pool.ID}, action) + assert.NoError(t, err) } -func TestHandleDefinitionBroadcastTokenPoolExistingConfirmed(t *testing.T) { - sh, bs := newTestDefinitionHandler(t) +func TestHandleDefinitionBroadcastTokenPoolExistingPublishUpsertFail(t *testing.T) { + dh, bs := newTestDefinitionHandler(t) - announce := newPoolAnnouncement() - pool := announce.Pool - msg, data, err := buildPoolDefinitionMessage(announce) + definition := newPoolDefinition() + pool := definition.Pool + msg, data, err := buildPoolDefinitionMessage(definition) assert.NoError(t, err) + existing := &core.TokenPool{ - State: core.TokenPoolStateConfirmed, + ID: pool.ID, + Active: true, + Name: "existing-pool", } + newPool := *pool + newPool.Name = existing.Name + newPool.Connector = "connector1" + newPool.Published = true + newPool.Message = msg.Header.ID + newPool.Active = false + + dh.mdi.On("InsertOrGetTokenPool", context.Background(), mock.MatchedBy(func(p *core.TokenPool) bool { + return *p.ID == *pool.ID && p.Message == msg.Header.ID + })).Return(existing, nil) + dh.mdi.On("UpsertTokenPool", context.Background(), &newPool, database.UpsertOptimizationExisting).Return(fmt.Errorf("pop")) + dh.mim.On("GetRootOrgDID", context.Background()).Return("firefly:org1", nil) + + action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, msg, data, fftypes.NewUUID()) + assert.Equal(t, HandlerResult{Action: core.ActionRetry, CustomCorrelator: pool.ID}, action) + assert.EqualError(t, err, "pop") +} - mdi := sh.database.(*databasemocks.Plugin) - mdi.On("GetTokenPoolByID", context.Background(), "ns1", pool.ID).Return(existing, nil) +func TestHandleDefinitionBroadcastTokenPoolExistingPublishOrgFail(t *testing.T) { + dh, bs := newTestDefinitionHandler(t) - action, err := sh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, msg, data, fftypes.NewUUID()) - assert.Equal(t, HandlerResult{Action: core.ActionConfirm, CustomCorrelator: pool.ID}, action) + definition := newPoolDefinition() + pool := definition.Pool + msg, data, err := buildPoolDefinitionMessage(definition) assert.NoError(t, err) - mdi.AssertExpectations(t) + existing := &core.TokenPool{ + ID: pool.ID, + Active: true, + Name: "existing-pool", + } + newPool := *pool + newPool.Name = existing.Name + newPool.Connector = "connector1" + newPool.Published = true + newPool.Message = msg.Header.ID + newPool.Active = false + + dh.mim.On("GetRootOrgDID", context.Background()).Return("", fmt.Errorf("pop")) + + action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, msg, data, fftypes.NewUUID()) + assert.Equal(t, HandlerResult{Action: core.ActionRetry}, action) + assert.EqualError(t, err, "pop") } -func TestHandleDefinitionBroadcastTokenPoolIDMismatch(t *testing.T) { - sh, bs := newTestDefinitionHandler(t) +func TestHandleDefinitionBroadcastTokenPoolExistingPublishOrgMismatch(t *testing.T) { + dh, bs := newTestDefinitionHandler(t) - announce := newPoolAnnouncement() - pool := announce.Pool - msg, data, err := buildPoolDefinitionMessage(announce) + definition := newPoolDefinition() + pool := definition.Pool + msg, data, err := buildPoolDefinitionMessage(definition) assert.NoError(t, err) - mdi := sh.database.(*databasemocks.Plugin) - mdi.On("GetTokenPoolByID", context.Background(), "ns1", pool.ID).Return(nil, nil) - mdi.On("UpsertTokenPool", context.Background(), mock.MatchedBy(func(p *core.TokenPool) bool { + existing := &core.TokenPool{ + ID: pool.ID, + Active: true, + Name: "existing-pool", + } + newPool := *pool + newPool.Name = existing.Name + newPool.Connector = "connector1" + newPool.Published = true + newPool.Message = msg.Header.ID + newPool.Active = false + + dh.mdi.On("InsertOrGetTokenPool", context.Background(), mock.MatchedBy(func(p *core.TokenPool) bool { return *p.ID == *pool.ID && p.Message == msg.Header.ID - })).Return(database.IDMismatch) + })).Return(existing, nil) + dh.mim.On("GetRootOrgDID", context.Background()).Return("firefly:org2", nil) - action, err := sh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, msg, data, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, msg, data, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: core.ActionReject, CustomCorrelator: pool.ID}, action) - assert.Error(t, err) - - mdi.AssertExpectations(t) - bs.assertNoFinalizers() + assert.Regexp(t, "FF10407", err) } func TestHandleDefinitionBroadcastTokenPoolFailUpsert(t *testing.T) { - sh, bs := newTestDefinitionHandler(t) + dh, bs := newTestDefinitionHandler(t) - announce := newPoolAnnouncement() - pool := announce.Pool - msg, data, err := buildPoolDefinitionMessage(announce) + definition := newPoolDefinition() + pool := definition.Pool + msg, data, err := buildPoolDefinitionMessage(definition) assert.NoError(t, err) - mdi := sh.database.(*databasemocks.Plugin) - mdi.On("GetTokenPoolByID", context.Background(), "ns1", pool.ID).Return(nil, nil) - mdi.On("UpsertTokenPool", context.Background(), mock.MatchedBy(func(p *core.TokenPool) bool { + dh.mdi.On("InsertOrGetTokenPool", context.Background(), mock.MatchedBy(func(p *core.TokenPool) bool { return *p.ID == *pool.ID && p.Message == msg.Header.ID - })).Return(fmt.Errorf("pop")) + })).Return(nil, fmt.Errorf("pop")) + dh.mim.On("GetRootOrgDID", context.Background()).Return("firefly:org1", nil) - action, err := sh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, msg, data, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, msg, data, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: core.ActionRetry}, action) assert.EqualError(t, err, "pop") - mdi.AssertExpectations(t) bs.assertNoFinalizers() } func TestHandleDefinitionBroadcastTokenPoolActivateFail(t *testing.T) { - sh, bs := newTestDefinitionHandler(t) + dh, bs := newTestDefinitionHandler(t) - announce := newPoolAnnouncement() - pool := announce.Pool - msg, data, err := buildPoolDefinitionMessage(announce) + definition := newPoolDefinition() + pool := definition.Pool + msg, data, err := buildPoolDefinitionMessage(definition) assert.NoError(t, err) - mdi := sh.database.(*databasemocks.Plugin) - mam := sh.assets.(*assetmocks.Manager) - mdi.On("GetTokenPoolByID", context.Background(), "ns1", pool.ID).Return(nil, nil) - mdi.On("UpsertTokenPool", context.Background(), mock.MatchedBy(func(p *core.TokenPool) bool { + dh.mdi.On("InsertOrGetTokenPool", context.Background(), mock.MatchedBy(func(p *core.TokenPool) bool { return *p.ID == *pool.ID && p.Message == msg.Header.ID - })).Return(nil) - mam.On("ActivateTokenPool", context.Background(), mock.AnythingOfType("*core.TokenPool")).Return(fmt.Errorf("pop")) + })).Return(nil, nil) + dh.mam.On("ActivateTokenPool", context.Background(), mock.AnythingOfType("*core.TokenPool")).Return(fmt.Errorf("pop")) + dh.mim.On("GetRootOrgDID", context.Background()).Return("firefly:org1", nil) - action, err := sh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, msg, data, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, msg, data, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: core.ActionWait, CustomCorrelator: pool.ID}, action) assert.NoError(t, err) err = bs.RunPreFinalize(context.Background()) assert.EqualError(t, err, "pop") - - mdi.AssertExpectations(t) } func TestHandleDefinitionBroadcastTokenPoolValidateFail(t *testing.T) { - sh, bs := newTestDefinitionHandler(t) + dh, bs := newTestDefinitionHandler(t) - announce := &core.TokenPoolAnnouncement{ + definition := &core.TokenPoolDefinition{ Pool: &core.TokenPool{}, } - msg, data, err := buildPoolDefinitionMessage(announce) + msg, data, err := buildPoolDefinitionMessage(definition) assert.NoError(t, err) - action, err := sh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, msg, data, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, msg, data, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: core.ActionReject}, action) assert.Error(t, err) bs.assertNoFinalizers() } func TestHandleDefinitionBroadcastTokenPoolBadMessage(t *testing.T) { - sh, bs := newTestDefinitionHandler(t) + dh, bs := newTestDefinitionHandler(t) msg := &core.Message{ Header: core.MessageHeader{ @@ -273,7 +417,7 @@ func TestHandleDefinitionBroadcastTokenPoolBadMessage(t *testing.T) { }, } - action, err := sh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, msg, nil, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(context.Background(), &bs.BatchState, msg, nil, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: core.ActionReject}, action) assert.Error(t, err) bs.assertNoFinalizers() diff --git a/internal/definitions/sender.go b/internal/definitions/sender.go index e41f8d8ef6..3d998c3a86 100644 --- a/internal/definitions/sender.go +++ b/internal/definitions/sender.go @@ -28,6 +28,7 @@ import ( "github.com/hyperledger/firefly/internal/coremsgs" "github.com/hyperledger/firefly/internal/data" "github.com/hyperledger/firefly/internal/identity" + "github.com/hyperledger/firefly/internal/syncasync" "github.com/hyperledger/firefly/pkg/blockchain" "github.com/hyperledger/firefly/pkg/core" "github.com/hyperledger/firefly/pkg/database" @@ -37,12 +38,15 @@ import ( type Sender interface { core.Named - ClaimIdentity(ctx context.Context, def *core.IdentityClaim, signingIdentity *core.SignerRef, parentSigner *core.SignerRef, waitConfirm bool) error + ClaimIdentity(ctx context.Context, def *core.IdentityClaim, signingIdentity *core.SignerRef, parentSigner *core.SignerRef) error UpdateIdentity(ctx context.Context, identity *core.Identity, def *core.IdentityUpdate, signingIdentity *core.SignerRef, waitConfirm bool) error DefineDatatype(ctx context.Context, datatype *core.Datatype, waitConfirm bool) error - DefineTokenPool(ctx context.Context, pool *core.TokenPoolAnnouncement, waitConfirm bool) error + DefineTokenPool(ctx context.Context, pool *core.TokenPool, waitConfirm bool) error + PublishTokenPool(ctx context.Context, poolNameOrID, networkName string, waitConfirm bool) (*core.TokenPool, error) DefineFFI(ctx context.Context, ffi *fftypes.FFI, waitConfirm bool) error + PublishFFI(ctx context.Context, name, version, networkName string, waitConfirm bool) (*fftypes.FFI, error) DefineContractAPI(ctx context.Context, httpServerURL string, api *core.ContractAPI, waitConfirm bool) error + PublishContractAPI(ctx context.Context, httpServerURL, name, networkName string, waitConfirm bool) (api *core.ContractAPI, err error) } type definitionSender struct { @@ -54,8 +58,9 @@ type definitionSender struct { identity identity.Manager data data.Manager contracts contracts.Manager // optional + assets assets.Manager handler *definitionHandler - tokenBroadcastNames map[string]string // mapping of token connector name => remote name + tokenBroadcastNames map[string]string // mapping of token connector name => broadcast name } // Definitions that get processed immediately will create a temporary batch state and then finalize it inline @@ -84,6 +89,7 @@ func NewDefinitionSender(ctx context.Context, ns *core.Namespace, multiparty boo identity: im, data: dm, contracts: cm, + assets: am, tokenBroadcastNames: tokenBroadcastNames, } dh, err := newDefinitionHandler(ctx, ns, multiparty, di, bi, dx, dm, im, am, cm, reverseMap(tokenBroadcastNames)) @@ -100,36 +106,53 @@ func reverseMap(orderedMap map[string]string) map[string]string { return reverseMap } -func (bm *definitionSender) Name() string { +func (ds *definitionSender) Name() string { return "DefinitionSender" } -func (bm *definitionSender) sendDefinitionDefault(ctx context.Context, def core.Definition, tag string, waitConfirm bool) (msg *core.Message, err error) { - org, err := bm.identity.GetMultipartyRootOrg(ctx) - if err != nil { - return nil, err - } +type sendWrapper struct { + sender syncasync.Sender + message *core.Message + err error +} - return bm.sendDefinition(ctx, def, &core.SignerRef{ /* resolve to node default */ - Author: org.DID, - }, tag, waitConfirm) +func wrapSendError(err error) *sendWrapper { + return &sendWrapper{err: err} } -func (bm *definitionSender) sendDefinition(ctx context.Context, def core.Definition, signingIdentity *core.SignerRef, tag string, waitConfirm bool) (msg *core.Message, err error) { +func (w *sendWrapper) send(ctx context.Context, waitConfirm bool) (*core.Message, error) { + switch { + case w.err != nil: + return nil, w.err + case waitConfirm: + return w.message, w.sender.SendAndWait(ctx) + default: + return w.message, w.sender.Send(ctx) + } +} - err = bm.identity.ResolveInputSigningIdentity(ctx, signingIdentity) +func (ds *definitionSender) getSenderDefault(ctx context.Context, def core.Definition, tag string) *sendWrapper { + org, err := ds.identity.GetRootOrg(ctx) if err != nil { - return nil, err + return wrapSendError(err) } - - return bm.sendDefinitionCommon(ctx, def, signingIdentity, tag, waitConfirm) + return ds.getSender(ctx, def, &core.SignerRef{ /* resolve to node default */ + Author: org.DID, + }, tag) } -func (bm *definitionSender) sendDefinitionCommon(ctx context.Context, def core.Definition, signingIdentity *core.SignerRef, tag string, waitConfirm bool) (*core.Message, error) { +func (ds *definitionSender) getSender(ctx context.Context, def core.Definition, signingIdentity *core.SignerRef, tag string) *sendWrapper { + err := ds.identity.ResolveInputSigningIdentity(ctx, signingIdentity) + if err != nil { + return wrapSendError(err) + } + return ds.getSenderResolved(ctx, def, signingIdentity, tag) +} +func (ds *definitionSender) getSenderResolved(ctx context.Context, def core.Definition, signingIdentity *core.SignerRef, tag string) *sendWrapper { b, err := json.Marshal(&def) if err != nil { - return nil, i18n.WrapError(ctx, err, coremsgs.MsgSerializationFailed) + return wrapSendError(i18n.WrapError(ctx, err, coremsgs.MsgSerializationFailed)) } dataValue := fftypes.JSONAnyPtrBytes(b) message := &core.MessageInOut{ @@ -146,12 +169,8 @@ func (bm *definitionSender) sendDefinitionCommon(ctx context.Context, def core.D &core.DataRefOrValue{Value: dataValue}, }, } - - sender := bm.broadcast.NewBroadcast(message) - if waitConfirm { - err = sender.SendAndWait(ctx) - } else { - err = sender.Send(ctx) + return &sendWrapper{ + message: &message.Message, + sender: ds.broadcast.NewBroadcast(message), } - return &message.Message, err } diff --git a/internal/definitions/sender_contracts.go b/internal/definitions/sender_contracts.go index 813ab57fd2..7264a9f926 100644 --- a/internal/definitions/sender_contracts.go +++ b/internal/definitions/sender_contracts.go @@ -18,13 +18,17 @@ package definitions import ( "context" + "errors" "github.com/hyperledger/firefly-common/pkg/fftypes" + "github.com/hyperledger/firefly-common/pkg/i18n" + "github.com/hyperledger/firefly/internal/coremsgs" "github.com/hyperledger/firefly/pkg/core" ) -func (bm *definitionSender) DefineFFI(ctx context.Context, ffi *fftypes.FFI, waitConfirm bool) error { +func (ds *definitionSender) DefineFFI(ctx context.Context, ffi *fftypes.FFI, waitConfirm bool) error { ffi.ID = fftypes.NewUUID() + ffi.Namespace = ds.namespace for _, method := range ffi.Methods { method.ID = fftypes.NewUUID() } @@ -35,45 +39,175 @@ func (bm *definitionSender) DefineFFI(ctx context.Context, ffi *fftypes.FFI, wai errorDef.ID = fftypes.NewUUID() } - if bm.multiparty { - if err := bm.contracts.ResolveFFI(ctx, ffi); err != nil { - return err - } - - ffi.Namespace = "" - msg, err := bm.sendDefinitionDefault(ctx, ffi, core.SystemTagDefineFFI, waitConfirm) - if msg != nil { - ffi.Message = msg.Header.ID + if ffi.Published { + if !ds.multiparty { + return i18n.NewError(ctx, coremsgs.MsgActionNotSupported) } - ffi.Namespace = bm.namespace + _, err := ds.getFFISender(ctx, ffi).send(ctx, waitConfirm) return err } + ffi.NetworkName = "" + return fakeBatch(ctx, func(ctx context.Context, state *core.BatchState) (HandlerResult, error) { - return bm.handler.handleFFIDefinition(ctx, state, ffi, nil) + hr, err := ds.handler.handleFFIDefinition(ctx, state, ffi, nil, true) + if err != nil { + if innerErr := errors.Unwrap(err); innerErr != nil { + return hr, innerErr + } + } + return hr, err }) } -func (bm *definitionSender) DefineContractAPI(ctx context.Context, httpServerURL string, api *core.ContractAPI, waitConfirm bool) error { - if api.ID == nil { - api.ID = fftypes.NewUUID() +func (ds *definitionSender) getFFISender(ctx context.Context, ffi *fftypes.FFI) *sendWrapper { + if err := ds.contracts.ResolveFFI(ctx, ffi); err != nil { + return wrapSendError(err) + } + + if ffi.NetworkName == "" { + ffi.NetworkName = ffi.Name + } + + existing, err := ds.database.GetFFIByNetworkName(ctx, ds.namespace, ffi.NetworkName, ffi.Version) + if err != nil { + return wrapSendError(err) + } else if existing != nil { + return wrapSendError(i18n.NewError(ctx, coremsgs.MsgNetworkNameExists)) + } + + // Prepare the FFI definition to be serialized for broadcast + localName := ffi.Name + ffi.Name = "" + ffi.Namespace = "" + ffi.Published = true + + sender := ds.getSenderDefault(ctx, ffi, core.SystemTagDefineFFI) + if sender.message != nil { + ffi.Message = sender.message.Header.ID } - if bm.multiparty { - if err := bm.contracts.ResolveContractAPI(ctx, httpServerURL, api); err != nil { + ffi.Name = localName + ffi.Namespace = ds.namespace + return sender +} + +func (ds *definitionSender) PublishFFI(ctx context.Context, name, version, networkName string, waitConfirm bool) (ffi *fftypes.FFI, err error) { + if !ds.multiparty { + return nil, i18n.NewError(ctx, coremsgs.MsgActionNotSupported) + } + + var sender *sendWrapper + err = ds.database.RunAsGroup(ctx, func(ctx context.Context) error { + if ffi, err = ds.contracts.GetFFIWithChildren(ctx, name, version); err != nil { return err } + if ffi.Published { + return i18n.NewError(ctx, coremsgs.MsgAlreadyPublished) + } + ffi.NetworkName = networkName + sender = ds.getFFISender(ctx, ffi) + if sender.err != nil { + return sender.err + } + return sender.sender.Prepare(ctx) + }) + if err != nil { + return nil, err + } + + _, err = sender.send(ctx, waitConfirm) + return ffi, err +} - api.Namespace = "" - msg, err := bm.sendDefinitionDefault(ctx, api, core.SystemTagDefineContractAPI, waitConfirm) - if msg != nil { - api.Message = msg.Header.ID +func (ds *definitionSender) DefineContractAPI(ctx context.Context, httpServerURL string, api *core.ContractAPI, waitConfirm bool) error { + if api.ID == nil { + api.ID = fftypes.NewUUID() + } + api.Namespace = ds.namespace + + if api.Published { + if !ds.multiparty { + return i18n.NewError(ctx, coremsgs.MsgActionNotSupported) } - api.Namespace = bm.namespace + _, err := ds.getContractAPISender(ctx, httpServerURL, api).send(ctx, waitConfirm) return err } + api.NetworkName = "" + return fakeBatch(ctx, func(ctx context.Context, state *core.BatchState) (HandlerResult, error) { - return bm.handler.handleContractAPIDefinition(ctx, state, httpServerURL, api, nil) + return ds.handler.handleContractAPIDefinition(ctx, state, httpServerURL, api, nil, true) + }) +} + +func (ds *definitionSender) getContractAPISender(ctx context.Context, httpServerURL string, api *core.ContractAPI) *sendWrapper { + if err := ds.contracts.ResolveContractAPI(ctx, httpServerURL, api); err != nil { + return wrapSendError(err) + } + + if api.NetworkName == "" { + api.NetworkName = api.Name + } + + existing, err := ds.database.GetContractAPIByNetworkName(ctx, ds.namespace, api.NetworkName) + if err != nil { + return wrapSendError(err) + } else if existing != nil { + return wrapSendError(i18n.NewError(ctx, coremsgs.MsgNetworkNameExists)) + } + if api.Interface != nil && api.Interface.ID != nil { + iface, err := ds.database.GetFFIByID(ctx, ds.namespace, api.Interface.ID) + switch { + case err != nil: + return wrapSendError(err) + case iface == nil: + return wrapSendError(i18n.NewError(ctx, coremsgs.MsgContractInterfaceNotFound, api.Interface.ID)) + case !iface.Published: + return wrapSendError(i18n.NewError(ctx, coremsgs.MsgContractInterfaceNotPublished, api.Interface.ID)) + } + } + + // Prepare the API definition to be serialized for broadcast + localName := api.Name + api.Name = "" + api.Namespace = "" + api.Published = true + + sender := ds.getSenderDefault(ctx, api, core.SystemTagDefineContractAPI) + if sender.message != nil { + api.Message = sender.message.Header.ID + } + + api.Name = localName + api.Namespace = ds.namespace + return sender +} + +func (ds *definitionSender) PublishContractAPI(ctx context.Context, httpServerURL, name, networkName string, waitConfirm bool) (api *core.ContractAPI, err error) { + if !ds.multiparty { + return nil, i18n.NewError(ctx, coremsgs.MsgActionNotSupported) + } + + var sender *sendWrapper + err = ds.database.RunAsGroup(ctx, func(ctx context.Context) error { + if api, err = ds.contracts.GetContractAPI(ctx, httpServerURL, name); err != nil { + return err + } + if api.Published { + return i18n.NewError(ctx, coremsgs.MsgAlreadyPublished) + } + api.NetworkName = networkName + sender = ds.getContractAPISender(ctx, httpServerURL, api) + if sender.err != nil { + return sender.err + } + return sender.sender.Prepare(ctx) }) + if err != nil { + return nil, err + } + + _, err = sender.send(ctx, waitConfirm) + return api, err } diff --git a/internal/definitions/sender_contracts_test.go b/internal/definitions/sender_contracts_test.go index 74ea7776c6..b0717cc460 100644 --- a/internal/definitions/sender_contracts_test.go +++ b/internal/definitions/sender_contracts_test.go @@ -22,9 +22,6 @@ import ( "testing" "github.com/hyperledger/firefly-common/pkg/fftypes" - "github.com/hyperledger/firefly/mocks/broadcastmocks" - "github.com/hyperledger/firefly/mocks/contractmocks" - "github.com/hyperledger/firefly/mocks/identitymanagermocks" "github.com/hyperledger/firefly/mocks/syncasyncmocks" "github.com/hyperledger/firefly/pkg/core" "github.com/stretchr/testify/assert" @@ -32,165 +29,665 @@ import ( ) func TestDefineFFIResolveFail(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) ds.multiparty = true ffi := &fftypes.FFI{ - Methods: []*fftypes.FFIMethod{{}}, - Events: []*fftypes.FFIEvent{{}}, - Errors: []*fftypes.FFIError{{}}, + Name: "ffi1", + Version: "1.0", + Methods: []*fftypes.FFIMethod{{}}, + Events: []*fftypes.FFIEvent{{}}, + Errors: []*fftypes.FFIError{{}}, + Published: true, } - mcm := ds.contracts.(*contractmocks.Manager) - mcm.On("ResolveFFI", context.Background(), ffi).Return(fmt.Errorf("pop")) + ds.mcm.On("ResolveFFI", context.Background(), ffi).Return(fmt.Errorf("pop")) err := ds.DefineFFI(context.Background(), ffi, false) assert.EqualError(t, err, "pop") - - mcm.AssertExpectations(t) } func TestDefineFFIFail(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) ds.multiparty = true - ffi := &fftypes.FFI{} - - mcm := ds.contracts.(*contractmocks.Manager) - mcm.On("ResolveFFI", context.Background(), ffi).Return(nil) + ffi := &fftypes.FFI{ + Name: "ffi1", + Version: "1.0", + Published: true, + } - mim := ds.identity.(*identitymanagermocks.Manager) - mim.On("GetMultipartyRootOrg", context.Background()).Return(nil, fmt.Errorf("pop")) + ds.mdi.On("GetFFIByNetworkName", context.Background(), "ns1", "ffi1", "1.0").Return(nil, nil) + ds.mcm.On("ResolveFFI", context.Background(), ffi).Return(nil) + ds.mim.On("GetRootOrg", context.Background()).Return(nil, fmt.Errorf("pop")) err := ds.DefineFFI(context.Background(), ffi, false) assert.EqualError(t, err, "pop") +} + +func TestDefineFFIFailInnerError(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + ffi := &fftypes.FFI{ + Name: "ffi1", + Version: "1.0", + Published: false, + } - mcm.AssertExpectations(t) - mim.AssertExpectations(t) + ds.mcm.On("ResolveFFI", context.Background(), ffi).Return(nil) + ds.mdi.On("InsertOrGetFFI", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("error2: [%w]", fmt.Errorf("pop"))) + err := ds.DefineFFI(context.Background(), ffi, false) + assert.Regexp(t, "pop", err) } -func TestDefineFFIOk(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() +func TestDefineFFIExists(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + ffi := &fftypes.FFI{ + Name: "ffi1", + Version: "1.0", + Published: true, + } + + ds.mdi.On("GetFFIByNetworkName", context.Background(), "ns1", "ffi1", "1.0").Return(&fftypes.FFI{}, nil) + ds.mcm.On("ResolveFFI", context.Background(), ffi).Return(nil) + + err := ds.DefineFFI(context.Background(), ffi, false) + assert.Regexp(t, "FF10448", err) +} + +func TestDefineFFIQueryFail(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) ds.multiparty = true - ffi := &fftypes.FFI{} + ffi := &fftypes.FFI{ + Name: "ffi1", + Version: "1.0", + Published: true, + } + + ds.mdi.On("GetFFIByNetworkName", context.Background(), "ns1", "ffi1", "1.0").Return(nil, fmt.Errorf("pop")) + ds.mcm.On("ResolveFFI", context.Background(), ffi).Return(nil) - mcm := ds.contracts.(*contractmocks.Manager) - mcm.On("ResolveFFI", context.Background(), ffi).Return(nil) + err := ds.DefineFFI(context.Background(), ffi, false) + assert.EqualError(t, err, "pop") +} + +func TestDefineFFIOk(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true - mim := ds.identity.(*identitymanagermocks.Manager) - mim.On("GetMultipartyRootOrg", context.Background()).Return(&core.Identity{ + ffi := &fftypes.FFI{ + Name: "ffi1", + Version: "1.0", + Published: true, + } + + ds.mdi.On("GetFFIByNetworkName", context.Background(), "ns1", "ffi1", "1.0").Return(nil, nil) + ds.mcm.On("ResolveFFI", context.Background(), ffi).Return(nil) + ds.mim.On("GetRootOrg", context.Background()).Return(&core.Identity{ IdentityBase: core.IdentityBase{ DID: "firefly:org1", }, }, nil) - mim.On("ResolveInputSigningIdentity", context.Background(), mock.Anything).Return(nil) + ds.mim.On("ResolveInputSigningIdentity", context.Background(), mock.Anything).Return(nil) - mbm := ds.broadcast.(*broadcastmocks.Manager) mms := &syncasyncmocks.Sender{} - mbm.On("NewBroadcast", mock.Anything).Return(mms) + ds.mbm.On("NewBroadcast", mock.Anything).Return(mms) mms.On("Send", context.Background()).Return(nil) err := ds.DefineFFI(context.Background(), ffi, false) assert.NoError(t, err) - mcm.AssertExpectations(t) - mim.AssertExpectations(t) - mbm.AssertExpectations(t) mms.AssertExpectations(t) } +func TestDefineFFIConfirm(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + ffi := &fftypes.FFI{ + Name: "ffi1", + Version: "1.0", + Published: true, + } + + ds.mdi.On("GetFFIByNetworkName", context.Background(), "ns1", "ffi1", "1.0").Return(nil, nil) + ds.mcm.On("ResolveFFI", context.Background(), ffi).Return(nil) + ds.mim.On("GetRootOrg", context.Background()).Return(&core.Identity{ + IdentityBase: core.IdentityBase{ + DID: "firefly:org1", + }, + }, nil) + ds.mim.On("ResolveInputSigningIdentity", context.Background(), mock.Anything).Return(nil) + + mms := &syncasyncmocks.Sender{} + ds.mbm.On("NewBroadcast", mock.Anything).Return(mms) + mms.On("SendAndWait", context.Background()).Return(nil) + + err := ds.DefineFFI(context.Background(), ffi, true) + assert.NoError(t, err) + + mms.AssertExpectations(t) +} + +func TestDefineFFIPublishNonMultiparty(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = false + + ffi := &fftypes.FFI{ + Name: "ffi1", + Version: "1.0", + Published: true, + } + + err := ds.DefineFFI(context.Background(), ffi, false) + assert.Regexp(t, "FF10414", err) +} + func TestDefineFFINonMultiparty(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + + ffi := &fftypes.FFI{ + Name: "ffi1", + Version: "1.0", + } + + ds.mcm.On("ResolveFFI", context.Background(), ffi).Return(nil) + ds.mdi.On("InsertOrGetFFI", context.Background(), ffi).Return(nil, nil) + ds.mdi.On("InsertEvent", context.Background(), mock.Anything).Return(nil) + + err := ds.DefineFFI(context.Background(), ffi, false) + assert.NoError(t, err) +} + +func TestDefineFFINonMultipartyFail(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + + ffi := &fftypes.FFI{ + Name: "ffi1", + Version: "1.0", + } - ffi := &fftypes.FFI{} + ds.mcm.On("ResolveFFI", context.Background(), ffi).Return(fmt.Errorf("pop")) err := ds.DefineFFI(context.Background(), ffi, false) assert.Regexp(t, "FF10403", err) } func TestDefineContractAPIResolveFail(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) ds.multiparty = true url := "http://firefly" - api := &core.ContractAPI{} + api := &core.ContractAPI{ + Name: "banana", + Published: true, + } - mcm := ds.contracts.(*contractmocks.Manager) - mcm.On("ResolveContractAPI", context.Background(), url, api).Return(fmt.Errorf("pop")) + ds.mcm.On("ResolveContractAPI", context.Background(), url, api).Return(fmt.Errorf("pop")) err := ds.DefineContractAPI(context.Background(), url, api, false) assert.EqualError(t, err, "pop") - - mcm.AssertExpectations(t) } func TestDefineContractAPIFail(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) ds.multiparty = true url := "http://firefly" - api := &core.ContractAPI{} - - mcm := ds.contracts.(*contractmocks.Manager) - mcm.On("ResolveContractAPI", context.Background(), url, api).Return(nil) + api := &core.ContractAPI{ + Name: "banana", + Published: true, + } - mim := ds.identity.(*identitymanagermocks.Manager) - mim.On("GetMultipartyRootOrg", context.Background()).Return(nil, fmt.Errorf("pop")) + ds.mcm.On("ResolveContractAPI", context.Background(), url, api).Return(nil) + ds.mim.On("GetRootOrg", context.Background()).Return(nil, fmt.Errorf("pop")) + ds.mdi.On("GetContractAPIByNetworkName", context.Background(), "ns1", "banana").Return(nil, nil) err := ds.DefineContractAPI(context.Background(), url, api, false) assert.EqualError(t, err, "pop") - - mcm.AssertExpectations(t) - mim.AssertExpectations(t) } func TestDefineContractAPIOk(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) ds.multiparty = true url := "http://firefly" - api := &core.ContractAPI{} - - mcm := ds.contracts.(*contractmocks.Manager) - mcm.On("ResolveContractAPI", context.Background(), url, api).Return(nil) + api := &core.ContractAPI{ + Name: "banana", + Published: true, + } - mim := ds.identity.(*identitymanagermocks.Manager) - mim.On("GetMultipartyRootOrg", context.Background()).Return(&core.Identity{ + ds.mcm.On("ResolveContractAPI", context.Background(), url, api).Return(nil) + ds.mim.On("GetRootOrg", context.Background()).Return(&core.Identity{ IdentityBase: core.IdentityBase{ DID: "firefly:org1", }, }, nil) - mim.On("ResolveInputSigningIdentity", context.Background(), mock.Anything).Return(nil) + ds.mim.On("ResolveInputSigningIdentity", context.Background(), mock.Anything).Return(nil) + ds.mdi.On("GetContractAPIByNetworkName", context.Background(), "ns1", "banana").Return(nil, nil) - mbm := ds.broadcast.(*broadcastmocks.Manager) mms := &syncasyncmocks.Sender{} - mbm.On("NewBroadcast", mock.Anything).Return(mms) + ds.mbm.On("NewBroadcast", mock.Anything).Return(mms) mms.On("Send", context.Background()).Return(nil) err := ds.DefineContractAPI(context.Background(), url, api, false) assert.NoError(t, err) - mcm.AssertExpectations(t) - mim.AssertExpectations(t) - mbm.AssertExpectations(t) mms.AssertExpectations(t) } func TestDefineContractAPINonMultiparty(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) url := "http://firefly" api := &core.ContractAPI{} + ds.mcm.On("ResolveContractAPI", context.Background(), url, api).Return(nil) + ds.mdi.On("InsertOrGetContractAPI", mock.Anything, mock.Anything).Return(nil, nil) + ds.mdi.On("InsertEvent", mock.Anything, mock.Anything).Return(nil) + err := ds.DefineContractAPI(context.Background(), url, api, false) - assert.Regexp(t, "FF10403", err) + assert.NoError(t, err) +} + +func TestDefineContractAPIPublishNonMultiparty(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = false + + url := "http://firefly" + api := &core.ContractAPI{ + Name: "banana", + Published: true, + } + + err := ds.DefineContractAPI(context.Background(), url, api, false) + assert.Regexp(t, "FF10414", err) +} + +func TestDefineContractAPINonMultipartyUpdate(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = false + testUUID := fftypes.NewUUID() + + url := "http://firefly" + api := &core.ContractAPI{ + ID: testUUID, + Name: "banana", + Published: false, + } + ds.mcm.On("ResolveContractAPI", context.Background(), url, api).Return(nil) + ds.mdi.On("InsertOrGetContractAPI", mock.Anything, mock.Anything).Return(api, nil) + ds.mdi.On("UpsertContractAPI", mock.Anything, mock.Anything, mock.Anything).Return(nil) + ds.mdi.On("InsertEvent", mock.Anything, mock.Anything).Return(nil) + + err := ds.DefineContractAPI(context.Background(), url, api, false) + assert.NoError(t, err) +} + +func TestPublishFFI(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + mms := &syncasyncmocks.Sender{} + + ffi := &fftypes.FFI{ + Name: "ffi1", + Version: "1.0", + Namespace: "ns1", + Published: false, + } + + ds.mdi.On("GetFFIByNetworkName", context.Background(), "ns1", "ffi1-shared", "1.0").Return(nil, nil) + ds.mcm.On("GetFFIWithChildren", context.Background(), "ffi1", "1.0").Return(ffi, nil) + ds.mcm.On("ResolveFFI", context.Background(), ffi).Return(nil) + ds.mim.On("GetRootOrg", context.Background()).Return(&core.Identity{ + IdentityBase: core.IdentityBase{ + DID: "firefly:org1", + }, + }, nil) + ds.mim.On("ResolveInputSigningIdentity", mock.Anything, mock.Anything).Return(nil) + ds.mbm.On("NewBroadcast", mock.Anything).Return(mms) + mms.On("Prepare", context.Background()).Return(nil) + mms.On("Send", context.Background()).Return(nil) + mockRunAsGroupPassthrough(ds.mdi) + + result, err := ds.PublishFFI(context.Background(), "ffi1", "1.0", "ffi1-shared", false) + assert.NoError(t, err) + assert.Equal(t, ffi, result) + assert.True(t, ffi.Published) + + mms.AssertExpectations(t) +} + +func TestPublishFFIAlreadyPublished(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + ffi := &fftypes.FFI{ + Name: "ffi1", + Version: "1.0", + Namespace: "ns1", + Published: true, + } + + ds.mcm.On("GetFFIWithChildren", context.Background(), "ffi1", "1.0").Return(ffi, nil) + mockRunAsGroupPassthrough(ds.mdi) + + _, err := ds.PublishFFI(context.Background(), "ffi1", "1.0", "ffi1-shared", false) + assert.Regexp(t, "FF10450", err) +} + +func TestPublishFFIQueryFail(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + ds.mcm.On("GetFFIWithChildren", context.Background(), "ffi1", "1.0").Return(nil, fmt.Errorf("pop")) + mockRunAsGroupPassthrough(ds.mdi) + + _, err := ds.PublishFFI(context.Background(), "ffi1", "1.0", "ffi1-shared", false) + assert.EqualError(t, err, "pop") +} + +func TestPublishFFIResolveFail(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + ffi := &fftypes.FFI{ + Name: "ffi1", + Version: "1.0", + Namespace: "ns1", + Published: false, + } + + ds.mcm.On("GetFFIWithChildren", context.Background(), "ffi1", "1.0").Return(ffi, nil) + ds.mcm.On("ResolveFFI", context.Background(), ffi).Return(fmt.Errorf("pop")) + mockRunAsGroupPassthrough(ds.mdi) + + _, err := ds.PublishFFI(context.Background(), "ffi1", "1.0", "ffi1-shared", false) + assert.EqualError(t, err, "pop") +} + +func TestPublishFFIPrepareFail(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + mms := &syncasyncmocks.Sender{} + + ffi := &fftypes.FFI{ + Name: "ffi1", + Version: "1.0", + Namespace: "ns1", + Published: false, + } + + ds.mdi.On("GetFFIByNetworkName", context.Background(), "ns1", "ffi1-shared", "1.0").Return(nil, nil) + ds.mcm.On("GetFFIWithChildren", context.Background(), "ffi1", "1.0").Return(ffi, nil) + ds.mcm.On("ResolveFFI", context.Background(), ffi).Return(nil) + ds.mim.On("GetRootOrg", context.Background()).Return(&core.Identity{ + IdentityBase: core.IdentityBase{ + DID: "firefly:org1", + }, + }, nil) + ds.mim.On("ResolveInputSigningIdentity", mock.Anything, mock.Anything).Return(nil) + ds.mbm.On("NewBroadcast", mock.Anything).Return(mms) + mms.On("Prepare", context.Background()).Return(fmt.Errorf("pop")) + mockRunAsGroupPassthrough(ds.mdi) + + _, err := ds.PublishFFI(context.Background(), "ffi1", "1.0", "ffi1-shared", false) + assert.EqualError(t, err, "pop") + + mms.AssertExpectations(t) +} + +func TestPublishFFINonMultiparty(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = false + + _, err := ds.PublishFFI(context.Background(), "ffi1", "1.0", "ffi1-shared", false) + assert.Regexp(t, "FF10414", err) +} + +func TestPublishContractAPI(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + mms := &syncasyncmocks.Sender{} + + url := "http://firefly" + api := &core.ContractAPI{ + Name: "ffi1", + Namespace: "ns1", + Published: false, + } + + ds.mdi.On("GetContractAPIByNetworkName", context.Background(), "ns1", "api-shared").Return(nil, nil) + ds.mcm.On("GetContractAPI", context.Background(), url, "api").Return(api, nil) + ds.mcm.On("ResolveContractAPI", context.Background(), url, api).Return(nil) + ds.mim.On("GetRootOrg", context.Background()).Return(&core.Identity{ + IdentityBase: core.IdentityBase{ + DID: "firefly:org1", + }, + }, nil) + ds.mim.On("ResolveInputSigningIdentity", mock.Anything, mock.Anything).Return(nil) + ds.mbm.On("NewBroadcast", mock.Anything).Return(mms) + mms.On("Prepare", context.Background()).Return(nil) + mms.On("Send", context.Background()).Return(nil) + mockRunAsGroupPassthrough(ds.mdi) + + result, err := ds.PublishContractAPI(context.Background(), url, "api", "api-shared", false) + assert.NoError(t, err) + assert.Equal(t, api, result) + assert.True(t, api.Published) + + mms.AssertExpectations(t) +} + +func TestPublishContractAPIAlreadyPublished(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + url := "http://firefly" + api := &core.ContractAPI{ + Name: "ffi1", + Namespace: "ns1", + Published: true, + } + + ds.mcm.On("GetContractAPI", context.Background(), url, "api").Return(api, nil) + mockRunAsGroupPassthrough(ds.mdi) + + _, err := ds.PublishContractAPI(context.Background(), url, "api", "api-shared", false) + assert.Regexp(t, "FF10450", err) +} + +func TestPublishContractAPIQueryFail(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + url := "http://firefly" + + ds.mcm.On("GetContractAPI", context.Background(), url, "api").Return(nil, fmt.Errorf("pop")) + mockRunAsGroupPassthrough(ds.mdi) + + _, err := ds.PublishContractAPI(context.Background(), url, "api", "api-shared", false) + assert.EqualError(t, err, "pop") +} + +func TestPublishContractAPIResolveFail(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + url := "http://firefly" + api := &core.ContractAPI{ + Name: "ffi1", + Namespace: "ns1", + Published: false, + } + + ds.mcm.On("GetContractAPI", context.Background(), url, "api").Return(api, nil) + ds.mcm.On("ResolveContractAPI", context.Background(), url, api).Return(fmt.Errorf("pop")) + mockRunAsGroupPassthrough(ds.mdi) + + _, err := ds.PublishContractAPI(context.Background(), url, "api", "api-shared", false) + assert.EqualError(t, err, "pop") +} + +func TestPublishContractAPINonMultiparty(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = false + + url := "http://firefly" + + _, err := ds.PublishContractAPI(context.Background(), url, "api", "api-shared", false) + assert.Regexp(t, "FF10414", err) +} + +func TestPublishContractAPINetworkNameFail(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + url := "http://firefly" + api := &core.ContractAPI{ + Name: "ffi1", + Namespace: "ns1", + Published: false, + } + + ds.mdi.On("GetContractAPIByNetworkName", context.Background(), "ns1", "api-shared").Return(nil, fmt.Errorf("pop")) + ds.mcm.On("GetContractAPI", context.Background(), url, "api").Return(api, nil) + ds.mcm.On("ResolveContractAPI", context.Background(), url, api).Return(nil) + mockRunAsGroupPassthrough(ds.mdi) + + _, err := ds.PublishContractAPI(context.Background(), url, "api", "api-shared", false) + assert.EqualError(t, err, "pop") +} + +func TestPublishContractAPINetworkNameConflict(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + url := "http://firefly" + api := &core.ContractAPI{ + Name: "ffi1", + Namespace: "ns1", + Published: false, + } + + ds.mdi.On("GetContractAPIByNetworkName", context.Background(), "ns1", "api-shared").Return(&core.ContractAPI{}, nil) + ds.mcm.On("GetContractAPI", context.Background(), url, "api").Return(api, nil) + ds.mcm.On("ResolveContractAPI", context.Background(), url, api).Return(nil) + mockRunAsGroupPassthrough(ds.mdi) + + _, err := ds.PublishContractAPI(context.Background(), url, "api", "api-shared", false) + assert.Regexp(t, "FF10448", err) +} + +func TestPublishContractAPIInterfaceFail(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + url := "http://firefly" + api := &core.ContractAPI{ + Name: "ffi1", + Namespace: "ns1", + Published: false, + Interface: &fftypes.FFIReference{ + ID: fftypes.NewUUID(), + }, + } + + ds.mcm.On("GetContractAPI", context.Background(), url, "api").Return(api, nil) + ds.mdi.On("GetContractAPIByNetworkName", context.Background(), "ns1", "api-shared").Return(nil, nil) + ds.mcm.On("ResolveContractAPI", context.Background(), url, api).Return(nil) + mockRunAsGroupPassthrough(ds.mdi) + ds.mdi.On("GetFFIByID", context.Background(), "ns1", api.Interface.ID).Return(nil, fmt.Errorf("pop")) + + _, err := ds.PublishContractAPI(context.Background(), url, "api", "api-shared", false) + assert.EqualError(t, err, "pop") +} + +func TestPublishContractAPIInterfaceNotFound(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + url := "http://firefly" + api := &core.ContractAPI{ + Name: "ffi1", + Namespace: "ns1", + Published: false, + Interface: &fftypes.FFIReference{ + ID: fftypes.NewUUID(), + }, + } + + ds.mcm.On("GetContractAPI", context.Background(), url, "api").Return(api, nil) + ds.mdi.On("GetContractAPIByNetworkName", context.Background(), "ns1", "api-shared").Return(nil, nil) + ds.mcm.On("ResolveContractAPI", context.Background(), url, api).Return(nil) + mockRunAsGroupPassthrough(ds.mdi) + ds.mdi.On("GetFFIByID", context.Background(), "ns1", api.Interface.ID).Return(nil, nil) + + _, err := ds.PublishContractAPI(context.Background(), url, "api", "api-shared", false) + assert.Regexp(t, "FF10303", err) +} + +func TestPublishContractAPIInterfaceNotPublished(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + url := "http://firefly" + api := &core.ContractAPI{ + Name: "ffi1", + Namespace: "ns1", + Published: false, + Interface: &fftypes.FFIReference{ + ID: fftypes.NewUUID(), + }, + } + + ds.mcm.On("GetContractAPI", context.Background(), url, "api").Return(api, nil) + ds.mdi.On("GetContractAPIByNetworkName", context.Background(), "ns1", "api-shared").Return(nil, nil) + ds.mcm.On("ResolveContractAPI", context.Background(), url, api).Return(nil) + mockRunAsGroupPassthrough(ds.mdi) + ds.mdi.On("GetFFIByID", context.Background(), "ns1", api.Interface.ID).Return(&fftypes.FFI{ + Published: false, + }, nil) + + _, err := ds.PublishContractAPI(context.Background(), url, "api", "api-shared", false) + assert.Regexp(t, "FF10451", err) } diff --git a/internal/definitions/sender_datatype.go b/internal/definitions/sender_datatype.go index 63fbd2c80c..d34ee9e9e9 100644 --- a/internal/definitions/sender_datatype.go +++ b/internal/definitions/sender_datatype.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -25,7 +25,7 @@ import ( "github.com/hyperledger/firefly/pkg/core" ) -func (bm *definitionSender) DefineDatatype(ctx context.Context, datatype *core.Datatype, waitConfirm bool) error { +func (ds *definitionSender) DefineDatatype(ctx context.Context, datatype *core.Datatype, waitConfirm bool) error { // Validate the input data definition data datatype.ID = fftypes.NewUUID() datatype.Created = fftypes.Now() @@ -34,21 +34,21 @@ func (bm *definitionSender) DefineDatatype(ctx context.Context, datatype *core.D } datatype.Hash = datatype.Value.Hash() - if bm.multiparty { + if ds.multiparty { if err := datatype.Validate(ctx, false); err != nil { return err } // Verify the data type is now all valid, before we broadcast it - if err := bm.data.CheckDatatype(ctx, datatype); err != nil { + if err := ds.data.CheckDatatype(ctx, datatype); err != nil { return err } datatype.Namespace = "" - msg, err := bm.sendDefinitionDefault(ctx, datatype, core.SystemTagDefineDatatype, waitConfirm) + msg, err := ds.getSenderDefault(ctx, datatype, core.SystemTagDefineDatatype).send(ctx, waitConfirm) if msg != nil { datatype.Message = msg.Header.ID } - datatype.Namespace = bm.namespace + datatype.Namespace = ds.namespace return err } diff --git a/internal/definitions/sender_datatype_test.go b/internal/definitions/sender_datatype_test.go index 86cdc22377..d770a20ca9 100644 --- a/internal/definitions/sender_datatype_test.go +++ b/internal/definitions/sender_datatype_test.go @@ -22,9 +22,6 @@ import ( "testing" "github.com/hyperledger/firefly-common/pkg/fftypes" - "github.com/hyperledger/firefly/mocks/broadcastmocks" - "github.com/hyperledger/firefly/mocks/datamocks" - "github.com/hyperledger/firefly/mocks/identitymanagermocks" "github.com/hyperledger/firefly/mocks/syncasyncmocks" "github.com/hyperledger/firefly/pkg/core" "github.com/stretchr/testify/assert" @@ -32,8 +29,8 @@ import ( ) func TestDefineDatatypeBadType(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) ds.multiparty = true err := ds.DefineDatatype(context.Background(), &core.Datatype{ Validator: core.ValidatorType("wrong"), @@ -42,18 +39,18 @@ func TestDefineDatatypeBadType(t *testing.T) { } func TestBroadcastDatatypeBadValue(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) ds.multiparty = true - mdm := ds.data.(*datamocks.Manager) - mdm.On("CheckDatatype", mock.Anything, mock.Anything).Return(nil) - mim := ds.identity.(*identitymanagermocks.Manager) - mim.On("GetMultipartyRootOrg", context.Background()).Return(&core.Identity{ + + ds.mdm.On("CheckDatatype", mock.Anything, mock.Anything).Return(nil) + ds.mim.On("GetRootOrg", context.Background()).Return(&core.Identity{ IdentityBase: core.IdentityBase{ DID: "firefly:org1", }, }, nil) - mim.On("ResolveInputSigningIdentity", mock.Anything, mock.Anything).Return(nil) + ds.mim.On("ResolveInputSigningIdentity", mock.Anything, mock.Anything).Return(nil) + err := ds.DefineDatatype(context.Background(), &core.Datatype{ Namespace: "ns1", Name: "ent1", @@ -61,20 +58,14 @@ func TestBroadcastDatatypeBadValue(t *testing.T) { Value: fftypes.JSONAnyPtr(`!unparsable`), }, false) assert.Regexp(t, "FF10137.*value", err) - - mdm.AssertExpectations(t) - mim.AssertExpectations(t) } func TestDefineDatatypeInvalid(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) ds.multiparty = true - mdm := ds.data.(*datamocks.Manager) - mim := ds.identity.(*identitymanagermocks.Manager) - mim.On("ResolveInputIdentity", mock.Anything, mock.Anything).Return(nil) - mdm.On("CheckDatatype", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) + ds.mdm.On("CheckDatatype", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) err := ds.DefineDatatype(context.Background(), &core.Datatype{ Namespace: "ns1", @@ -83,27 +74,22 @@ func TestDefineDatatypeInvalid(t *testing.T) { Value: fftypes.JSONAnyPtr(`{"some": "data"}`), }, false) assert.EqualError(t, err, "pop") - - mdm.AssertExpectations(t) } func TestBroadcastOk(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) ds.multiparty = true - mdm := ds.data.(*datamocks.Manager) - mim := ds.identity.(*identitymanagermocks.Manager) - mbm := ds.broadcast.(*broadcastmocks.Manager) mms := &syncasyncmocks.Sender{} - mim.On("GetMultipartyRootOrg", context.Background()).Return(&core.Identity{ + ds.mim.On("GetRootOrg", context.Background()).Return(&core.Identity{ IdentityBase: core.IdentityBase{ DID: "firefly:org1", }, }, nil) - mim.On("ResolveInputSigningIdentity", mock.Anything, mock.Anything).Return(nil) - mdm.On("CheckDatatype", mock.Anything, mock.Anything).Return(nil) - mbm.On("NewBroadcast", mock.Anything).Return(mms) + ds.mim.On("ResolveInputSigningIdentity", mock.Anything, mock.Anything).Return(nil) + ds.mdm.On("CheckDatatype", mock.Anything, mock.Anything).Return(nil) + ds.mbm.On("NewBroadcast", mock.Anything).Return(mms) mms.On("Send", context.Background()).Return(nil) err := ds.DefineDatatype(context.Background(), &core.Datatype{ @@ -114,15 +100,12 @@ func TestBroadcastOk(t *testing.T) { }, false) assert.NoError(t, err) - mdm.AssertExpectations(t) - mim.AssertExpectations(t) - mbm.AssertExpectations(t) mms.AssertExpectations(t) } func TestDefineDatatypeNonMultiparty(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) ds.multiparty = false err := ds.DefineDatatype(context.Background(), &core.Datatype{ diff --git a/internal/definitions/sender_identity.go b/internal/definitions/sender_identity.go index 04bee92a70..02986684db 100644 --- a/internal/definitions/sender_identity.go +++ b/internal/definitions/sender_identity.go @@ -25,16 +25,16 @@ import ( // ClaimIdentity is a special form of CreateDefinition where the signing identity does not need to have been pre-registered // The blockchain "key" will be normalized, but the "author" will pass through unchecked -func (bm *definitionSender) ClaimIdentity(ctx context.Context, claim *core.IdentityClaim, signingIdentity *core.SignerRef, parentSigner *core.SignerRef, waitConfirm bool) error { - if bm.multiparty { +func (ds *definitionSender) ClaimIdentity(ctx context.Context, claim *core.IdentityClaim, signingIdentity *core.SignerRef, parentSigner *core.SignerRef) error { + if ds.multiparty { var err error - signingIdentity.Key, err = bm.identity.ResolveInputSigningKey(ctx, signingIdentity.Key, identity.KeyNormalizationBlockchainPlugin) + signingIdentity.Key, err = ds.identity.ResolveInputSigningKey(ctx, signingIdentity.Key, identity.KeyNormalizationBlockchainPlugin) if err != nil { return err } claim.Identity.Namespace = "" - claimMsg, err := bm.sendDefinitionCommon(ctx, claim, signingIdentity, core.SystemTagIdentityClaim, waitConfirm) + claimMsg, err := ds.getSenderResolved(ctx, claim, signingIdentity, core.SystemTagIdentityClaim).send(ctx, false) if err != nil { return err } @@ -42,13 +42,13 @@ func (bm *definitionSender) ClaimIdentity(ctx context.Context, claim *core.Ident // Send the verification if one is required. if parentSigner != nil { - verifyMsg, err := bm.sendDefinition(ctx, &core.IdentityVerification{ + verifyMsg, err := ds.getSender(ctx, &core.IdentityVerification{ Claim: core.MessageRef{ ID: claimMsg.Header.ID, Hash: claimMsg.Hash, }, Identity: claim.Identity.IdentityBase, - }, parentSigner, core.SystemTagIdentityVerification, false) + }, parentSigner, core.SystemTagIdentityVerification).send(ctx, false) if err != nil { return err } @@ -58,20 +58,20 @@ func (bm *definitionSender) ClaimIdentity(ctx context.Context, claim *core.Ident return nil } - claim.Identity.Namespace = bm.namespace + claim.Identity.Namespace = ds.namespace return fakeBatch(ctx, func(ctx context.Context, state *core.BatchState) (HandlerResult, error) { - return bm.handler.handleIdentityClaim(ctx, state, &identityMsgInfo{SignerRef: *signingIdentity}, claim) + return ds.handler.handleIdentityClaim(ctx, state, &identityMsgInfo{SignerRef: *signingIdentity}, claim) }) } -func (bm *definitionSender) UpdateIdentity(ctx context.Context, identity *core.Identity, def *core.IdentityUpdate, signingIdentity *core.SignerRef, waitConfirm bool) error { - if bm.multiparty { - updateMsg, err := bm.sendDefinition(ctx, def, signingIdentity, core.SystemTagIdentityUpdate, waitConfirm) +func (ds *definitionSender) UpdateIdentity(ctx context.Context, identity *core.Identity, def *core.IdentityUpdate, signingIdentity *core.SignerRef, waitConfirm bool) error { + if ds.multiparty { + updateMsg, err := ds.getSender(ctx, def, signingIdentity, core.SystemTagIdentityUpdate).send(ctx, waitConfirm) identity.Messages.Update = updateMsg.Header.ID return err } return fakeBatch(ctx, func(ctx context.Context, state *core.BatchState) (HandlerResult, error) { - return bm.handler.handleIdentityUpdate(ctx, state, &identityUpdateMsgInfo{}, def) + return ds.handler.handleIdentityUpdate(ctx, state, &identityUpdateMsgInfo{}, def) }) } diff --git a/internal/definitions/sender_identity_test.go b/internal/definitions/sender_identity_test.go index 4e035ee300..4564de6d29 100644 --- a/internal/definitions/sender_identity_test.go +++ b/internal/definitions/sender_identity_test.go @@ -21,8 +21,6 @@ import ( "testing" "github.com/hyperledger/firefly/internal/identity" - "github.com/hyperledger/firefly/mocks/broadcastmocks" - "github.com/hyperledger/firefly/mocks/identitymanagermocks" "github.com/hyperledger/firefly/mocks/syncasyncmocks" "github.com/hyperledger/firefly/pkg/core" "github.com/stretchr/testify/assert" @@ -30,16 +28,14 @@ import ( ) func TestClaimIdentity(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) - mim := ds.identity.(*identitymanagermocks.Manager) - mbm := ds.broadcast.(*broadcastmocks.Manager) mms := &syncasyncmocks.Sender{} - mim.On("ResolveInputSigningKey", mock.Anything, "0x1234", identity.KeyNormalizationBlockchainPlugin).Return("", nil) - mbm.On("NewBroadcast", mock.Anything).Return(mms) - mms.On("SendAndWait", mock.Anything).Return(nil) + ds.mim.On("ResolveInputSigningKey", mock.Anything, "0x1234", identity.KeyNormalizationBlockchainPlugin).Return("", nil) + ds.mbm.On("NewBroadcast", mock.Anything).Return(mms) + mms.On("Send", mock.Anything).Return(nil) ds.multiparty = true @@ -47,25 +43,19 @@ func TestClaimIdentity(t *testing.T) { Identity: &core.Identity{}, }, &core.SignerRef{ Key: "0x1234", - }, nil, true) + }, nil) assert.NoError(t, err) - - mim.AssertExpectations(t) - mbm.AssertExpectations(t) - mms.AssertExpectations(t) } func TestClaimIdentityFail(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) - mim := ds.identity.(*identitymanagermocks.Manager) - mbm := ds.broadcast.(*broadcastmocks.Manager) mms := &syncasyncmocks.Sender{} - mim.On("ResolveInputSigningKey", mock.Anything, "0x1234", identity.KeyNormalizationBlockchainPlugin).Return("", nil) - mbm.On("NewBroadcast", mock.Anything).Return(mms) - mms.On("SendAndWait", mock.Anything).Return(fmt.Errorf("pop")) + ds.mim.On("ResolveInputSigningKey", mock.Anything, "0x1234", identity.KeyNormalizationBlockchainPlugin).Return("", nil) + ds.mbm.On("NewBroadcast", mock.Anything).Return(mms) + mms.On("Send", mock.Anything).Return(fmt.Errorf("pop")) ds.multiparty = true @@ -73,21 +63,17 @@ func TestClaimIdentityFail(t *testing.T) { Identity: &core.Identity{}, }, &core.SignerRef{ Key: "0x1234", - }, nil, true) + }, nil) assert.EqualError(t, err, "pop") - mim.AssertExpectations(t) - mbm.AssertExpectations(t) mms.AssertExpectations(t) } func TestClaimIdentityFailKey(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) - mim := ds.identity.(*identitymanagermocks.Manager) - - mim.On("ResolveInputSigningKey", mock.Anything, "0x1234", identity.KeyNormalizationBlockchainPlugin).Return("", fmt.Errorf("pop")) + ds.mim.On("ResolveInputSigningKey", mock.Anything, "0x1234", identity.KeyNormalizationBlockchainPlugin).Return("", fmt.Errorf("pop")) ds.multiparty = true @@ -95,27 +81,23 @@ func TestClaimIdentityFailKey(t *testing.T) { Identity: &core.Identity{}, }, &core.SignerRef{ Key: "0x1234", - }, nil, true) + }, nil) assert.EqualError(t, err, "pop") - - mim.AssertExpectations(t) } func TestClaimIdentityChild(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) - mim := ds.identity.(*identitymanagermocks.Manager) - mbm := ds.broadcast.(*broadcastmocks.Manager) mms1 := &syncasyncmocks.Sender{} mms2 := &syncasyncmocks.Sender{} - mim.On("ResolveInputSigningKey", mock.Anything, "0x1234", identity.KeyNormalizationBlockchainPlugin).Return("", nil) - mbm.On("NewBroadcast", mock.Anything).Return(mms1).Once() - mbm.On("NewBroadcast", mock.Anything).Return(mms2).Once() - mms1.On("SendAndWait", mock.Anything).Return(nil) + ds.mim.On("ResolveInputSigningKey", mock.Anything, "0x1234", identity.KeyNormalizationBlockchainPlugin).Return("", nil) + ds.mbm.On("NewBroadcast", mock.Anything).Return(mms1).Once() + ds.mbm.On("NewBroadcast", mock.Anything).Return(mms2).Once() + mms1.On("Send", mock.Anything).Return(nil) mms2.On("Send", mock.Anything).Return(nil) - mim.On("ResolveInputSigningIdentity", mock.Anything, mock.MatchedBy(func(signer *core.SignerRef) bool { + ds.mim.On("ResolveInputSigningIdentity", mock.Anything, mock.MatchedBy(func(signer *core.SignerRef) bool { return signer.Key == "0x2345" })).Return(nil) @@ -127,29 +109,25 @@ func TestClaimIdentityChild(t *testing.T) { Key: "0x1234", }, &core.SignerRef{ Key: "0x2345", - }, true) + }) assert.NoError(t, err) - mim.AssertExpectations(t) - mbm.AssertExpectations(t) mms1.AssertExpectations(t) } func TestClaimIdentityChildFail(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) - mim := ds.identity.(*identitymanagermocks.Manager) - mbm := ds.broadcast.(*broadcastmocks.Manager) mms1 := &syncasyncmocks.Sender{} mms2 := &syncasyncmocks.Sender{} - mim.On("ResolveInputSigningKey", mock.Anything, "0x1234", identity.KeyNormalizationBlockchainPlugin).Return("", nil) - mbm.On("NewBroadcast", mock.Anything).Return(mms1).Once() - mbm.On("NewBroadcast", mock.Anything).Return(mms2).Once() - mms1.On("SendAndWait", mock.Anything).Return(nil) + ds.mim.On("ResolveInputSigningKey", mock.Anything, "0x1234", identity.KeyNormalizationBlockchainPlugin).Return("", nil) + ds.mbm.On("NewBroadcast", mock.Anything).Return(mms1).Once() + ds.mbm.On("NewBroadcast", mock.Anything).Return(mms2).Once() + mms1.On("Send", mock.Anything).Return(nil) mms2.On("Send", mock.Anything).Return(fmt.Errorf("pop")) - mim.On("ResolveInputSigningIdentity", mock.Anything, mock.MatchedBy(func(signer *core.SignerRef) bool { + ds.mim.On("ResolveInputSigningIdentity", mock.Anything, mock.MatchedBy(func(signer *core.SignerRef) bool { return signer.Key == "0x2345" })).Return(nil) @@ -161,21 +139,17 @@ func TestClaimIdentityChildFail(t *testing.T) { Key: "0x1234", }, &core.SignerRef{ Key: "0x2345", - }, true) + }) assert.EqualError(t, err, "pop") - mim.AssertExpectations(t) - mbm.AssertExpectations(t) mms1.AssertExpectations(t) } func TestClaimIdentityNonMultiparty(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() - dh := ds.handler + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) - mim := dh.identity.(*identitymanagermocks.Manager) - mim.On("VerifyIdentityChain", mock.Anything, mock.AnythingOfType("*core.Identity")).Return(nil, false, fmt.Errorf("pop")) + ds.mim.On("VerifyIdentityChain", mock.Anything, mock.AnythingOfType("*core.Identity")).Return(nil, false, fmt.Errorf("pop")) ds.multiparty = false @@ -183,23 +157,19 @@ func TestClaimIdentityNonMultiparty(t *testing.T) { Identity: &core.Identity{}, }, &core.SignerRef{ Key: "0x1234", - }, nil, true) + }, nil) assert.NoError(t, err) - - mim.AssertExpectations(t) } func TestUpdateIdentity(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) - mim := ds.identity.(*identitymanagermocks.Manager) - mbm := ds.broadcast.(*broadcastmocks.Manager) mms := &syncasyncmocks.Sender{} - mbm.On("NewBroadcast", mock.Anything).Return(mms) + ds.mbm.On("NewBroadcast", mock.Anything).Return(mms) mms.On("Send", mock.Anything).Return(nil) - mim.On("ResolveInputSigningIdentity", mock.Anything, mock.MatchedBy(func(signer *core.SignerRef) bool { + ds.mim.On("ResolveInputSigningIdentity", mock.Anything, mock.MatchedBy(func(signer *core.SignerRef) bool { return signer.Key == "0x1234" })).Return(nil) @@ -213,14 +183,12 @@ func TestUpdateIdentity(t *testing.T) { }, false) assert.NoError(t, err) - mim.AssertExpectations(t) - mbm.AssertExpectations(t) mms.AssertExpectations(t) } func TestUpdateIdentityNonMultiparty(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) ds.multiparty = false diff --git a/internal/definitions/sender_test.go b/internal/definitions/sender_test.go index 3adce3c312..b7a8f41499 100644 --- a/internal/definitions/sender_test.go +++ b/internal/definitions/sender_test.go @@ -35,7 +35,33 @@ import ( "github.com/stretchr/testify/mock" ) -func newTestDefinitionSender(t *testing.T) (*definitionSender, func()) { +type testDefinitionSender struct { + definitionSender + + cancel func() + mdi *databasemocks.Plugin + mbi *blockchainmocks.Plugin + mdx *dataexchangemocks.Plugin + mbm *broadcastmocks.Manager + mim *identitymanagermocks.Manager + mdm *datamocks.Manager + mam *assetmocks.Manager + mcm *contractmocks.Manager +} + +func (tds *testDefinitionSender) cleanup(t *testing.T) { + tds.cancel() + tds.mdi.AssertExpectations(t) + tds.mbi.AssertExpectations(t) + tds.mdx.AssertExpectations(t) + tds.mbm.AssertExpectations(t) + tds.mim.AssertExpectations(t) + tds.mdm.AssertExpectations(t) + tds.mam.AssertExpectations(t) + tds.mcm.AssertExpectations(t) +} + +func newTestDefinitionSender(t *testing.T) *testDefinitionSender { mdi := &databasemocks.Plugin{} mbi := &blockchainmocks.Plugin{} mdx := &dataexchangemocks.Plugin{} @@ -51,7 +77,27 @@ func newTestDefinitionSender(t *testing.T) (*definitionSender, func()) { ns := &core.Namespace{Name: "ns1", NetworkName: "ns1"} ds, _, err := NewDefinitionSender(ctx, ns, false, mdi, mbi, mdx, mbm, mim, mdm, mam, mcm, tokenBroadcastNames) assert.NoError(t, err) - return ds.(*definitionSender), cancel + + return &testDefinitionSender{ + definitionSender: *ds.(*definitionSender), + cancel: cancel, + mdi: mdi, + mbi: mbi, + mdx: mdx, + mbm: mbm, + mim: mim, + mdm: mdm, + mam: mam, + mcm: mcm, + } +} + +func mockRunAsGroupPassthrough(mdi *databasemocks.Plugin) { + rag := mdi.On("RunAsGroup", mock.Anything, mock.Anything) + rag.RunFn = func(a mock.Arguments) { + fn := a[1].(func(context.Context) error) + rag.ReturnArguments = mock.Arguments{fn(a[0].(context.Context))} + } } func TestInitSenderFail(t *testing.T) { @@ -60,20 +106,20 @@ func TestInitSenderFail(t *testing.T) { } func TestName(t *testing.T) { - bm, cancel := newTestDefinitionSender(t) - defer cancel() - assert.Equal(t, "DefinitionSender", bm.Name()) + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + assert.Equal(t, "DefinitionSender", ds.Name()) } func TestCreateDefinitionConfirm(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) mim := ds.identity.(*identitymanagermocks.Manager) mbm := ds.broadcast.(*broadcastmocks.Manager) mms := &syncasyncmocks.Sender{} - mim.On("GetMultipartyRootOrg", ds.ctx).Return(&core.Identity{ + mim.On("GetRootOrg", ds.ctx).Return(&core.Identity{ IdentityBase: core.IdentityBase{ DID: "firefly:org1", }, @@ -83,7 +129,7 @@ func TestCreateDefinitionConfirm(t *testing.T) { mms.On("SendAndWait", mock.Anything).Return(nil) ds.multiparty = true - _, err := ds.sendDefinitionDefault(ds.ctx, &core.Datatype{}, core.SystemTagDefineDatatype, true) + _, err := ds.getSenderDefault(ds.ctx, &core.Datatype{}, core.SystemTagDefineDatatype).send(ds.ctx, true) assert.NoError(t, err) mim.AssertExpectations(t) @@ -92,14 +138,14 @@ func TestCreateDefinitionConfirm(t *testing.T) { } func TestCreateDatatypeDefinitionAsNodeConfirm(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) mim := ds.identity.(*identitymanagermocks.Manager) mbm := ds.broadcast.(*broadcastmocks.Manager) mms := &syncasyncmocks.Sender{} - mim.On("GetMultipartyRootOrg", ds.ctx).Return(&core.Identity{ + mim.On("GetRootOrg", ds.ctx).Return(&core.Identity{ IdentityBase: core.IdentityBase{ DID: "firefly:org1", }, @@ -110,7 +156,7 @@ func TestCreateDatatypeDefinitionAsNodeConfirm(t *testing.T) { ds.multiparty = true - _, err := ds.sendDefinitionDefault(ds.ctx, &core.Datatype{}, core.SystemTagDefineDatatype, true) + _, err := ds.getSenderDefault(ds.ctx, &core.Datatype{}, core.SystemTagDefineDatatype).send(ds.ctx, true) assert.NoError(t, err) mim.AssertExpectations(t) @@ -119,16 +165,16 @@ func TestCreateDatatypeDefinitionAsNodeConfirm(t *testing.T) { } func TestCreateDefinitionBadIdentity(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) ds.multiparty = true mim := ds.identity.(*identitymanagermocks.Manager) mim.On("ResolveInputSigningIdentity", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - _, err := ds.sendDefinition(ds.ctx, &core.Datatype{}, &core.SignerRef{ + _, err := ds.getSender(ds.ctx, &core.Datatype{}, &core.SignerRef{ Author: "wrong", Key: "wrong", - }, core.SystemTagDefineDatatype, false) + }, core.SystemTagDefineDatatype).send(ds.ctx, false) assert.Regexp(t, "pop", err) } diff --git a/internal/definitions/sender_tokenpool.go b/internal/definitions/sender_tokenpool.go index b3df2b5a56..1b02783c51 100644 --- a/internal/definitions/sender_tokenpool.go +++ b/internal/definitions/sender_tokenpool.go @@ -26,32 +26,101 @@ import ( "github.com/hyperledger/firefly/pkg/core" ) -func (bm *definitionSender) DefineTokenPool(ctx context.Context, pool *core.TokenPoolAnnouncement, waitConfirm bool) error { - - if bm.multiparty { - // Map token connector name -> broadcast name - if broadcastName, exists := bm.tokenBroadcastNames[pool.Pool.Connector]; exists { - pool.Pool.Connector = broadcastName - } else { - log.L(ctx).Infof("Could not find broadcast name for token connector: %s", pool.Pool.Connector) - return i18n.NewError(ctx, coremsgs.MsgInvalidConnectorName, broadcastName, "token") - } +func (ds *definitionSender) PublishTokenPool(ctx context.Context, poolNameOrID, networkName string, waitConfirm bool) (pool *core.TokenPool, err error) { + if !ds.multiparty { + return nil, i18n.NewError(ctx, coremsgs.MsgActionNotSupported) + } - if err := pool.Pool.Validate(ctx); err != nil { + var sender *sendWrapper + err = ds.database.RunAsGroup(ctx, func(ctx context.Context) error { + if pool, err = ds.assets.GetTokenPoolByNameOrID(ctx, poolNameOrID); err != nil { return err } + if pool.Published { + return i18n.NewError(ctx, coremsgs.MsgAlreadyPublished) + } + pool.NetworkName = networkName + sender = ds.getTokenPoolSender(ctx, pool) + if sender.err != nil { + return sender.err + } + return sender.sender.Prepare(ctx) + }) + if err != nil { + return nil, err + } + + _, err = sender.send(ctx, waitConfirm) + return pool, err +} + +func (ds *definitionSender) getTokenPoolSender(ctx context.Context, pool *core.TokenPool) *sendWrapper { + // Map token connector name -> broadcast name + if broadcastName, exists := ds.tokenBroadcastNames[pool.Connector]; exists { + pool.Connector = broadcastName + } else { + log.L(ctx).Infof("Could not find broadcast name for token connector: %s", pool.Connector) + return wrapSendError(i18n.NewError(ctx, coremsgs.MsgInvalidConnectorName, broadcastName, "token")) + } + + if pool.NetworkName == "" { + pool.NetworkName = pool.Name + } - pool.Pool.Namespace = "" - msg, err := bm.sendDefinitionDefault(ctx, pool, core.SystemTagDefinePool, waitConfirm) - if msg != nil { - pool.Pool.Message = msg.Header.ID + // Validate the pool before sending + if err := pool.Validate(ctx); err != nil { + return wrapSendError(err) + } + existing, err := ds.database.GetTokenPoolByNetworkName(ctx, ds.namespace, pool.NetworkName) + if err != nil { + return wrapSendError(err) + } else if existing != nil { + return wrapSendError(i18n.NewError(ctx, coremsgs.MsgNetworkNameExists)) + } + if pool.Interface != nil && pool.Interface.ID != nil { + iface, err := ds.database.GetFFIByID(ctx, ds.namespace, pool.Interface.ID) + switch { + case err != nil: + return wrapSendError(err) + case iface == nil: + return wrapSendError(i18n.NewError(ctx, coremsgs.MsgContractInterfaceNotFound, pool.Interface.ID)) + case !iface.Published: + return wrapSendError(i18n.NewError(ctx, coremsgs.MsgContractInterfaceNotPublished, pool.Interface.ID)) } - pool.Pool.Namespace = bm.namespace + } + + // Prepare the pool definition to be serialized for broadcast + localName := pool.Name + pool.Name = "" + pool.Namespace = "" + pool.Published = true + pool.Active = false + definition := &core.TokenPoolDefinition{Pool: pool} + + sender := ds.getSenderDefault(ctx, definition, core.SystemTagDefinePool) + if sender.message != nil { + pool.Message = sender.message.Header.ID + } + + pool.Name = localName + pool.Namespace = ds.namespace + pool.Active = true + return sender +} + +func (ds *definitionSender) DefineTokenPool(ctx context.Context, pool *core.TokenPool, waitConfirm bool) error { + if pool.Published { + if !ds.multiparty { + return i18n.NewError(ctx, coremsgs.MsgActionNotSupported) + } + _, err := ds.getTokenPoolSender(ctx, pool).send(ctx, waitConfirm) return err } + pool.NetworkName = "" + return fakeBatch(ctx, func(ctx context.Context, state *core.BatchState) (HandlerResult, error) { - hr, err := bm.handler.handleTokenPoolDefinition(ctx, state, pool.Pool) + hr, err := ds.handler.handleTokenPoolDefinition(ctx, state, pool, true) if err != nil { if innerErr := errors.Unwrap(err); innerErr != nil { return hr, innerErr diff --git a/internal/definitions/sender_tokenpool_test.go b/internal/definitions/sender_tokenpool_test.go index 6842bcac56..8e15bfdb32 100644 --- a/internal/definitions/sender_tokenpool_test.go +++ b/internal/definitions/sender_tokenpool_test.go @@ -22,10 +22,6 @@ import ( "testing" "github.com/hyperledger/firefly-common/pkg/fftypes" - "github.com/hyperledger/firefly/mocks/broadcastmocks" - "github.com/hyperledger/firefly/mocks/databasemocks" - "github.com/hyperledger/firefly/mocks/datamocks" - "github.com/hyperledger/firefly/mocks/identitymanagermocks" "github.com/hyperledger/firefly/mocks/syncasyncmocks" "github.com/hyperledger/firefly/pkg/core" "github.com/stretchr/testify/assert" @@ -33,126 +29,121 @@ import ( ) func TestBroadcastTokenPoolInvalid(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) ds.multiparty = true - mdm := ds.data.(*datamocks.Manager) - - pool := &core.TokenPoolAnnouncement{ - Pool: &core.TokenPool{ - ID: fftypes.NewUUID(), - Namespace: "", - Name: "", - Type: core.TokenTypeNonFungible, - Locator: "N1", - Symbol: "COIN", - }, + pool := &core.TokenPool{ + ID: fftypes.NewUUID(), + Namespace: "", + Name: "", + Type: core.TokenTypeNonFungible, + Locator: "N1", + Symbol: "COIN", + Published: true, } err := ds.DefineTokenPool(context.Background(), pool, false) assert.Regexp(t, "FF10420", err) - - mdm.AssertExpectations(t) } func TestBroadcastTokenPoolInvalidNonMultiparty(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) ds.multiparty = false - mdm := ds.data.(*datamocks.Manager) - - pool := &core.TokenPoolAnnouncement{ - Pool: &core.TokenPool{ - ID: fftypes.NewUUID(), - Namespace: "", - Name: "", - Type: core.TokenTypeNonFungible, - Locator: "N1", - Symbol: "COIN", - }, + pool := &core.TokenPool{ + ID: fftypes.NewUUID(), + Namespace: "", + Name: "", + Type: core.TokenTypeNonFungible, + Locator: "N1", + Symbol: "COIN", + Published: false, } err := ds.DefineTokenPool(context.Background(), pool, false) assert.Regexp(t, "FF00140", err) +} + +func TestBroadcastTokenPoolPublishNonMultiparty(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = false - mdm.AssertExpectations(t) + pool := &core.TokenPool{ + ID: fftypes.NewUUID(), + Namespace: "", + Name: "", + Type: core.TokenTypeNonFungible, + Locator: "N1", + Symbol: "COIN", + Published: true, + } + + err := ds.DefineTokenPool(context.Background(), pool, false) + assert.Regexp(t, "FF10414", err) } func TestBroadcastTokenPoolInvalidNameMultiparty(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) ds.multiparty = true - mdm := ds.data.(*datamocks.Manager) - - pool := &core.TokenPoolAnnouncement{ - Pool: &core.TokenPool{ - ID: fftypes.NewUUID(), - Namespace: "", - Name: "", - Type: core.TokenTypeNonFungible, - Locator: "N1", - Symbol: "COIN", - Connector: "connector1", - }, + pool := &core.TokenPool{ + ID: fftypes.NewUUID(), + Namespace: "", + Name: "", + Type: core.TokenTypeNonFungible, + Locator: "N1", + Symbol: "COIN", + Connector: "connector1", + Published: true, } err := ds.DefineTokenPool(context.Background(), pool, false) assert.Regexp(t, "FF00140", err) - - mdm.AssertExpectations(t) } func TestDefineTokenPoolOk(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) ds.multiparty = true - mdm := ds.data.(*datamocks.Manager) - mim := ds.identity.(*identitymanagermocks.Manager) - mbm := ds.broadcast.(*broadcastmocks.Manager) mms := &syncasyncmocks.Sender{} - pool := &core.TokenPoolAnnouncement{ - Pool: &core.TokenPool{ - ID: fftypes.NewUUID(), - Namespace: "ns1", - Name: "mypool", - Type: core.TokenTypeNonFungible, - Locator: "N1", - Symbol: "COIN", - Connector: "connector1", - }, + pool := &core.TokenPool{ + ID: fftypes.NewUUID(), + Namespace: "ns1", + Name: "mypool", + Type: core.TokenTypeNonFungible, + Locator: "N1", + Symbol: "COIN", + Connector: "connector1", + Published: true, } - mim.On("GetMultipartyRootOrg", ds.ctx).Return(&core.Identity{ + ds.mim.On("GetRootOrg", ds.ctx).Return(&core.Identity{ IdentityBase: core.IdentityBase{ DID: "firefly:org1", }, }, nil) - mim.On("ResolveInputSigningIdentity", mock.Anything, mock.Anything).Return(nil) - mbm.On("NewBroadcast", mock.Anything).Return(mms) + ds.mim.On("ResolveInputSigningIdentity", mock.Anything, mock.Anything).Return(nil) + ds.mbm.On("NewBroadcast", mock.Anything).Return(mms) mms.On("Send", ds.ctx).Return(nil) + ds.mdi.On("GetTokenPoolByNetworkName", ds.ctx, "ns1", "mypool").Return(nil, nil) err := ds.DefineTokenPool(ds.ctx, pool, false) assert.NoError(t, err) - mdm.AssertExpectations(t) - mim.AssertExpectations(t) - mbm.AssertExpectations(t) mms.AssertExpectations(t) } func TestDefineTokenPoolkONonMultiparty(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) ds.multiparty = false - mdm := ds.data.(*datamocks.Manager) - mdb := ds.database.(*databasemocks.Plugin) - pool := &core.TokenPool{ ID: fftypes.NewUUID(), Namespace: "ns1", @@ -161,75 +152,428 @@ func TestDefineTokenPoolkONonMultiparty(t *testing.T) { Locator: "N1", Symbol: "COIN", Connector: "connector1", - State: core.TokenPoolStateConfirmed, - } - poolAnnouncement := &core.TokenPoolAnnouncement{ - Pool: pool, + Active: true, + Published: false, } - mdb.On("GetTokenPoolByID", mock.Anything, mock.Anything, mock.Anything).Return(pool, nil) + ds.mdi.On("InsertOrGetTokenPool", mock.Anything, pool).Return(nil, nil) + ds.mam.On("ActivateTokenPool", mock.Anything, pool).Return(nil) - err := ds.DefineTokenPool(context.Background(), poolAnnouncement, false) + err := ds.DefineTokenPool(context.Background(), pool, false) assert.NoError(t, err) - - mdm.AssertExpectations(t) - mdb.AssertExpectations(t) } func TestDefineTokenPoolNonMultipartyTokenPoolFail(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() - - mdm := ds.data.(*datamocks.Manager) - mbm := ds.broadcast.(*broadcastmocks.Manager) - mdi := ds.database.(*databasemocks.Plugin) - - pool := &core.TokenPoolAnnouncement{ - Pool: &core.TokenPool{ - ID: fftypes.NewUUID(), - Namespace: "ns1", - Name: "mypool", - Type: core.TokenTypeNonFungible, - Locator: "N1", - Symbol: "COIN", - Connector: "connector1", - }, + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + + pool := &core.TokenPool{ + ID: fftypes.NewUUID(), + Namespace: "ns1", + Name: "mypool", + Type: core.TokenTypeNonFungible, + Locator: "N1", + Symbol: "COIN", + Connector: "connector1", + Published: false, } - mdi.On("GetTokenPoolByID", context.Background(), "ns1", pool.Pool.ID).Return(nil, fmt.Errorf("pop")) + ds.mdi.On("InsertOrGetTokenPool", mock.Anything, pool).Return(nil, fmt.Errorf("pop")) err := ds.DefineTokenPool(context.Background(), pool, false) assert.Regexp(t, "pop", err) +} + +func TestDefineTokenPoolNonMultipartyTokenPoolFailInner(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + + pool := &core.TokenPool{ + ID: fftypes.NewUUID(), + Namespace: "ns1", + Name: "mypool", + Type: core.TokenTypeNonFungible, + Locator: "N1", + Symbol: "COIN", + Connector: "connector1", + Published: false, + } + + ds.mdi.On("InsertOrGetTokenPool", mock.Anything, pool).Return(nil, fmt.Errorf("error2: [%w]", fmt.Errorf("pop"))) - mdm.AssertExpectations(t) - mbm.AssertExpectations(t) + err := ds.DefineTokenPool(context.Background(), pool, false) + assert.Regexp(t, "pop", err) } func TestDefineTokenPoolBadName(t *testing.T) { - ds, cancel := newTestDefinitionSender(t) - defer cancel() + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + pool := &core.TokenPool{ + ID: fftypes.NewUUID(), + Namespace: "ns1", + Name: "///bad/////", + Type: core.TokenTypeNonFungible, + Locator: "N1", + Symbol: "COIN", + Connector: "connector1", + Published: false, + } + + err := ds.DefineTokenPool(context.Background(), pool, false) + assert.Regexp(t, "FF00140", err) +} + +func TestPublishTokenPool(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) ds.multiparty = true - mim := ds.identity.(*identitymanagermocks.Manager) - mbm := ds.broadcast.(*broadcastmocks.Manager) mms := &syncasyncmocks.Sender{} - pool := &core.TokenPoolAnnouncement{ - Pool: &core.TokenPool{ - ID: fftypes.NewUUID(), - Namespace: "ns1", - Name: "///bad/////", - Type: core.TokenTypeNonFungible, - Locator: "N1", - Symbol: "COIN", - Connector: "connector1", - }, + pool := &core.TokenPool{ + ID: fftypes.NewUUID(), + Namespace: "ns1", + Name: "pool1", + Type: core.TokenTypeNonFungible, + Locator: "N1", + Symbol: "COIN", + Connector: "connector1", + Published: false, } - mim.On("ResolveInputSigningIdentity", mock.Anything, mock.Anything).Return(nil) - mbm.On("NewBroadcast", mock.Anything).Return(mms) + ds.mam.On("GetTokenPoolByNameOrID", mock.Anything, "pool1").Return(pool, nil) + ds.mim.On("GetRootOrg", context.Background()).Return(&core.Identity{ + IdentityBase: core.IdentityBase{ + DID: "firefly:org1", + }, + }, nil) + ds.mim.On("ResolveInputSigningIdentity", mock.Anything, mock.Anything).Return(nil) + ds.mbm.On("NewBroadcast", mock.Anything).Return(mms) + mms.On("Prepare", context.Background()).Return(nil) mms.On("Send", context.Background()).Return(nil) + ds.mdi.On("GetTokenPoolByNetworkName", mock.Anything, "ns1", "pool-shared").Return(nil, nil) + mockRunAsGroupPassthrough(ds.mdi) - err := ds.DefineTokenPool(context.Background(), pool, false) - assert.Regexp(t, "FF00140", err) + result, err := ds.PublishTokenPool(context.Background(), "pool1", "pool-shared", false) + assert.NoError(t, err) + assert.Equal(t, pool, result) + assert.True(t, pool.Published) + + mms.AssertExpectations(t) +} + +func TestPublishTokenPoolNonMultiparty(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = false + + _, err := ds.PublishTokenPool(context.Background(), "pool1", "pool-shared", false) + assert.Regexp(t, "FF10414", err) +} + +func TestPublishTokenPoolAlreadyPublished(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + pool := &core.TokenPool{ + ID: fftypes.NewUUID(), + Namespace: "ns1", + Name: "pool1", + Type: core.TokenTypeNonFungible, + Locator: "N1", + Symbol: "COIN", + Connector: "connector1", + Published: true, + } + + ds.mam.On("GetTokenPoolByNameOrID", mock.Anything, "pool1").Return(pool, nil) + mockRunAsGroupPassthrough(ds.mdi) + + _, err := ds.PublishTokenPool(context.Background(), "pool1", "pool-shared", false) + assert.Regexp(t, "FF10450", err) +} + +func TestPublishTokenPoolQueryFail(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + ds.mam.On("GetTokenPoolByNameOrID", mock.Anything, "pool1").Return(nil, fmt.Errorf("pop")) + mockRunAsGroupPassthrough(ds.mdi) + + _, err := ds.PublishTokenPool(context.Background(), "pool1", "pool-shared", false) + assert.EqualError(t, err, "pop") +} + +func TestPublishTokenPoolNetworkNameError(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + pool := &core.TokenPool{ + ID: fftypes.NewUUID(), + Namespace: "ns1", + Name: "pool1", + Type: core.TokenTypeNonFungible, + Locator: "N1", + Symbol: "COIN", + Connector: "connector1", + Published: false, + } + + ds.mam.On("GetTokenPoolByNameOrID", mock.Anything, "pool1").Return(pool, nil) + ds.mdi.On("GetTokenPoolByNetworkName", mock.Anything, "ns1", "pool-shared").Return(nil, fmt.Errorf("pop")) + mockRunAsGroupPassthrough(ds.mdi) + + _, err := ds.PublishTokenPool(context.Background(), "pool1", "pool-shared", false) + assert.EqualError(t, err, "pop") +} + +func TestPublishTokenPoolNetworkNameConflict(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + pool := &core.TokenPool{ + ID: fftypes.NewUUID(), + Namespace: "ns1", + Name: "pool1", + Type: core.TokenTypeNonFungible, + Locator: "N1", + Symbol: "COIN", + Connector: "connector1", + Published: false, + } + + ds.mam.On("GetTokenPoolByNameOrID", mock.Anything, "pool1").Return(pool, nil) + ds.mdi.On("GetTokenPoolByNetworkName", mock.Anything, "ns1", "pool-shared").Return(&core.TokenPool{}, nil) + mockRunAsGroupPassthrough(ds.mdi) + + _, err := ds.PublishTokenPool(context.Background(), "pool1", "pool-shared", false) + assert.Regexp(t, "FF10448", err) +} + +func TestPublishTokenPoolResolveFail(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + pool := &core.TokenPool{ + ID: fftypes.NewUUID(), + Namespace: "ns1", + Name: "pool1", + Type: core.TokenTypeNonFungible, + Locator: "N1", + Symbol: "COIN", + Connector: "connector1", + Published: false, + } + + ds.mam.On("GetTokenPoolByNameOrID", mock.Anything, "pool1").Return(pool, nil) + ds.mim.On("GetRootOrg", context.Background()).Return(nil, fmt.Errorf("pop")) + ds.mdi.On("GetTokenPoolByNetworkName", mock.Anything, "ns1", "pool-shared").Return(nil, nil) + mockRunAsGroupPassthrough(ds.mdi) + + _, err := ds.PublishTokenPool(context.Background(), "pool1", "pool-shared", false) + assert.EqualError(t, err, "pop") +} + +func TestPublishTokenPoolPrepareFail(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + mms := &syncasyncmocks.Sender{} + + pool := &core.TokenPool{ + ID: fftypes.NewUUID(), + Namespace: "ns1", + Name: "pool1", + Type: core.TokenTypeNonFungible, + Locator: "N1", + Symbol: "COIN", + Connector: "connector1", + Published: false, + } + + ds.mam.On("GetTokenPoolByNameOrID", mock.Anything, "pool1").Return(pool, nil) + ds.mim.On("GetRootOrg", context.Background()).Return(&core.Identity{ + IdentityBase: core.IdentityBase{ + DID: "firefly:org1", + }, + }, nil) + ds.mim.On("ResolveInputSigningIdentity", mock.Anything, mock.Anything).Return(nil) + ds.mbm.On("NewBroadcast", mock.Anything).Return(mms) + mms.On("Prepare", context.Background()).Return(fmt.Errorf("pop")) + ds.mdi.On("GetTokenPoolByNetworkName", mock.Anything, "ns1", "pool-shared").Return(nil, nil) + mockRunAsGroupPassthrough(ds.mdi) + + _, err := ds.PublishTokenPool(context.Background(), "pool1", "pool-shared", false) + assert.EqualError(t, err, "pop") + + mms.AssertExpectations(t) +} + +func TestPublishTokenPoolSendFail(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + mms := &syncasyncmocks.Sender{} + + pool := &core.TokenPool{ + ID: fftypes.NewUUID(), + Namespace: "ns1", + Name: "pool1", + Type: core.TokenTypeNonFungible, + Locator: "N1", + Symbol: "COIN", + Connector: "connector1", + Published: false, + } + + ds.mam.On("GetTokenPoolByNameOrID", mock.Anything, "pool1").Return(pool, nil) + ds.mim.On("GetRootOrg", context.Background()).Return(&core.Identity{ + IdentityBase: core.IdentityBase{ + DID: "firefly:org1", + }, + }, nil) + ds.mim.On("ResolveInputSigningIdentity", mock.Anything, mock.Anything).Return(nil) + ds.mbm.On("NewBroadcast", mock.Anything).Return(mms) + mms.On("Prepare", context.Background()).Return(nil) + mms.On("Send", context.Background()).Return(fmt.Errorf("pop")) + ds.mdi.On("GetTokenPoolByNetworkName", mock.Anything, "ns1", "pool-shared").Return(nil, nil) + mockRunAsGroupPassthrough(ds.mdi) + + _, err := ds.PublishTokenPool(context.Background(), "pool1", "pool-shared", false) + assert.EqualError(t, err, "pop") + + mms.AssertExpectations(t) +} + +func TestPublishTokenPoolConfirm(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + mms := &syncasyncmocks.Sender{} + + pool := &core.TokenPool{ + ID: fftypes.NewUUID(), + Namespace: "ns1", + Name: "pool1", + Type: core.TokenTypeNonFungible, + Locator: "N1", + Symbol: "COIN", + Connector: "connector1", + Published: false, + } + + ds.mam.On("GetTokenPoolByNameOrID", mock.Anything, "pool1").Return(pool, nil) + ds.mim.On("GetRootOrg", context.Background()).Return(&core.Identity{ + IdentityBase: core.IdentityBase{ + DID: "firefly:org1", + }, + }, nil) + ds.mim.On("ResolveInputSigningIdentity", mock.Anything, mock.Anything).Return(nil) + ds.mbm.On("NewBroadcast", mock.Anything).Return(mms) + mms.On("Prepare", context.Background()).Return(nil) + mms.On("SendAndWait", context.Background()).Return(nil) + ds.mdi.On("GetTokenPoolByNetworkName", mock.Anything, "ns1", "pool-shared").Return(nil, nil) + mockRunAsGroupPassthrough(ds.mdi) + + _, err := ds.PublishTokenPool(context.Background(), "pool1", "pool-shared", true) + assert.NoError(t, err) + assert.True(t, pool.Published) + + mms.AssertExpectations(t) +} + +func TestPublishTokenPoolInterfaceFail(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + pool := &core.TokenPool{ + ID: fftypes.NewUUID(), + Namespace: "ns1", + Name: "pool1", + Type: core.TokenTypeNonFungible, + Locator: "N1", + Symbol: "COIN", + Connector: "connector1", + Published: false, + Interface: &fftypes.FFIReference{ + ID: fftypes.NewUUID(), + }, + } + + ds.mam.On("GetTokenPoolByNameOrID", mock.Anything, "pool1").Return(pool, nil) + ds.mdi.On("GetTokenPoolByNetworkName", mock.Anything, "ns1", "pool-shared").Return(nil, nil) + mockRunAsGroupPassthrough(ds.mdi) + ds.mdi.On("GetFFIByID", context.Background(), "ns1", pool.Interface.ID).Return(nil, fmt.Errorf("pop")) + + _, err := ds.PublishTokenPool(context.Background(), "pool1", "pool-shared", false) + assert.EqualError(t, err, "pop") +} + +func TestPublishTokenPoolInterfaceNotFound(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + pool := &core.TokenPool{ + ID: fftypes.NewUUID(), + Namespace: "ns1", + Name: "pool1", + Type: core.TokenTypeNonFungible, + Locator: "N1", + Symbol: "COIN", + Connector: "connector1", + Published: false, + Interface: &fftypes.FFIReference{ + ID: fftypes.NewUUID(), + }, + } + + ds.mam.On("GetTokenPoolByNameOrID", mock.Anything, "pool1").Return(pool, nil) + ds.mdi.On("GetTokenPoolByNetworkName", mock.Anything, "ns1", "pool-shared").Return(nil, nil) + mockRunAsGroupPassthrough(ds.mdi) + ds.mdi.On("GetFFIByID", context.Background(), "ns1", pool.Interface.ID).Return(nil, nil) + + _, err := ds.PublishTokenPool(context.Background(), "pool1", "pool-shared", false) + assert.Regexp(t, "FF10303", err) +} + +func TestPublishTokenPoolInterfaceNotPublished(t *testing.T) { + ds := newTestDefinitionSender(t) + defer ds.cleanup(t) + ds.multiparty = true + + pool := &core.TokenPool{ + ID: fftypes.NewUUID(), + Namespace: "ns1", + Name: "pool1", + Type: core.TokenTypeNonFungible, + Locator: "N1", + Symbol: "COIN", + Connector: "connector1", + Published: false, + Interface: &fftypes.FFIReference{ + ID: fftypes.NewUUID(), + }, + } + + ds.mam.On("GetTokenPoolByNameOrID", mock.Anything, "pool1").Return(pool, nil) + ds.mdi.On("GetTokenPoolByNetworkName", mock.Anything, "ns1", "pool-shared").Return(nil, nil) + mockRunAsGroupPassthrough(ds.mdi) + ds.mdi.On("GetFFIByID", context.Background(), "ns1", pool.Interface.ID).Return(&fftypes.FFI{ + Published: false, + }, nil) + + _, err := ds.PublishTokenPool(context.Background(), "pool1", "pool-shared", false) + assert.Regexp(t, "FF10451", err) } diff --git a/internal/events/aggregator.go b/internal/events/aggregator.go index 1880ec9b83..967dd315b3 100644 --- a/internal/events/aggregator.go +++ b/internal/events/aggregator.go @@ -26,10 +26,12 @@ import ( "github.com/hyperledger/firefly-common/pkg/config" "github.com/hyperledger/firefly-common/pkg/ffapi" "github.com/hyperledger/firefly-common/pkg/fftypes" + "github.com/hyperledger/firefly-common/pkg/i18n" "github.com/hyperledger/firefly-common/pkg/log" "github.com/hyperledger/firefly-common/pkg/retry" "github.com/hyperledger/firefly/internal/cache" "github.com/hyperledger/firefly/internal/coreconfig" + "github.com/hyperledger/firefly/internal/coremsgs" "github.com/hyperledger/firefly/internal/data" "github.com/hyperledger/firefly/internal/definitions" "github.com/hyperledger/firefly/internal/identity" @@ -415,8 +417,7 @@ func (ag *aggregator) checkOnchainConsistency(ctx context.Context, msg *core.Mes } if msg.Header.Key == "" || msg.Header.Key != pin.Signer { - l.Errorf("Invalid message '%s'. Key '%s' does not match the signer of the pin: %s", msg.Header.ID, msg.Header.Key, pin.Signer) - return core.ActionReject, nil // This is not retryable. Reject this message + return core.ActionReject, i18n.NewError(ctx, coremsgs.MsgInvalidMessageSigner, msg.Header.ID, msg.Header.Key, pin.Signer) } // Verify that we can resolve the signing key back to the identity that is claimed in the batch @@ -449,9 +450,7 @@ func (ag *aggregator) checkOnchainConsistency(ctx context.Context, msg *core.Mes } } if msg.Header.Author == "" || resolvedAuthor.DID != msg.Header.Author { - l.Errorf("Invalid message '%s'. Author '%s' does not match identity registered to %s: %s (%s)", msg.Header.ID, msg.Header.Author, verifierRef.Value, resolvedAuthor.DID, resolvedAuthor.ID) - return core.ActionReject, nil // This is not retryable. Reject this message - + return core.ActionReject, i18n.NewError(ctx, coremsgs.MsgInvalidMessageIdentity, msg.Header.ID, msg.Header.Author, verifierRef.Value, resolvedAuthor.DID, resolvedAuthor.ID) } return core.ActionConfirm, nil } @@ -481,7 +480,7 @@ func (ag *aggregator) processMessage(ctx context.Context, manifest *core.BatchMa default: // Check the pin signer is valid for the message action, err = ag.checkOnchainConsistency(ctx, msg, pin) - if action != core.ActionConfirm { + if action == core.ActionWait || action == core.ActionRetry { break } @@ -524,8 +523,10 @@ func (ag *aggregator) processMessage(ctx context.Context, manifest *core.BatchMa } } - l.Debugf("Attempt dispatch msg=%s broadcastContexts=%v privatePins=%v", msg.Header.ID, unmaskedContexts, msg.Pins) - action, correlator, err = ag.readyForDispatch(ctx, msg, data, manifest.TX.ID, state, pin) + if action == core.ActionConfirm { + l.Debugf("Attempt dispatch msg=%s broadcastContexts=%v privatePins=%v", msg.Header.ID, unmaskedContexts, msg.Pins) + action, correlator, err = ag.readyForDispatch(ctx, msg, data, manifest.TX.ID, state, pin) + } } if action == core.ActionRetry { @@ -538,6 +539,11 @@ func (ag *aggregator) processMessage(ctx context.Context, manifest *core.BatchMa return nil } + if action == core.ActionReject && err != nil { + log.L(ctx).Warnf("Message '%s' rejected: %s", msg.Header.ID, err) + msg.RejectReason = err.Error() + } + newState := ag.completeDispatch(action, correlator, msg, manifest.TX.ID, state) // Mark all message pins dispatched, and increment all nextPins @@ -616,15 +622,12 @@ func (ag *aggregator) readyForDispatch(ctx context.Context, msg *core.Message, d var handlerResult definitions.HandlerResult handlerResult, err = ag.definitions.HandleDefinitionBroadcast(ctx, &state.BatchState, msg, data, tx) log.L(ctx).Infof("Result of definition broadcast '%s' [%s]: %s", msg.Header.Tag, msg.Header.ID, handlerResult.Action) - if handlerResult.Action == core.ActionReject { - log.L(ctx).Infof("Definition broadcast '%s' rejected: %s", msg.Header.ID, err) - err = nil - } correlator = handlerResult.CustomCorrelator action = handlerResult.Action case msg.Header.Type == core.MessageTypeGroupInit: - // Already handled as part of resolving the context - do nothing. + // Already handled as part of resolving the context + action = core.ActionConfirm case len(msg.Data) > 0: var valid bool diff --git a/internal/events/aggregator_batch_state.go b/internal/events/aggregator_batch_state.go index ce072cb86c..710690c2b9 100644 --- a/internal/events/aggregator_batch_state.go +++ b/internal/events/aggregator_batch_state.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -76,6 +76,7 @@ type dispatchedMessage struct { topicCount int msgPins fftypes.FFStringArray newState core.MessageState + rejectReason string } // batchState is the object that tracks the in-memory state that builds up while processing a batch of pins, @@ -203,6 +204,7 @@ func (bs *batchState) markMessageDispatched(batchID *fftypes.UUID, msg *core.Mes topicCount: len(msg.Header.Topics), msgPins: msg.Pins, newState: newState, + rejectReason: msg.RejectReason, }) } @@ -217,6 +219,21 @@ func (bs *batchState) SetContextBlockedBy(ctx context.Context, unmaskedContext f } } +func (bs *batchState) confirmMessages(ctx context.Context, msgIDs []*fftypes.UUID, msgState core.MessageState, confirmTime *fftypes.FFTime, rejectReason string) error { + values := make([]driver.Value, len(msgIDs)) + for i, msgID := range msgIDs { + bs.data.UpdateMessageStateIfCached(ctx, msgID, msgState, confirmTime, rejectReason) + values[i] = msgID + } + fb := database.MessageQueryFactory.NewFilter(ctx) + filter := fb.In("id", values) + setConfirmed := database.MessageQueryFactory.NewUpdate(ctx). + Set("confirmed", confirmTime). + Set("state", msgState). + Set("rejectreason", rejectReason) + return bs.database.UpdateMessages(ctx, bs.namespace, filter, setConfirmed) +} + func (bs *batchState) flushPins(ctx context.Context) error { l := log.L(ctx) @@ -238,6 +255,10 @@ func (bs *batchState) flushPins(ctx context.Context) error { } } + // All messages get the same confirmed timestamp + // The Events (not Messages directly) should be used for confirm sequence + confirmTime := fftypes.Now() + // Update all the pins that have been dispatched // It's important we don't re-process the message, so we update all pins for a message to dispatched in one go, // using the index range of pins it owns within the batch it is a part of. @@ -254,7 +275,14 @@ func (bs *batchState) flushPins(ctx context.Context) error { if len(batchDispatched) > 0 { pinsDispatched[*dm.batchID] = batchDispatched } - msgStateUpdates[dm.newState] = append(msgStateUpdates[dm.newState], dm.msgID) + + if dm.newState == core.MessageStateRejected { + if err := bs.confirmMessages(ctx, []*fftypes.UUID{dm.msgID}, dm.newState, confirmTime, dm.rejectReason); err != nil { + return err + } + } else { + msgStateUpdates[dm.newState] = append(msgStateUpdates[dm.newState], dm.msgID) + } } // Build one uber update for DB efficiency @@ -274,19 +302,8 @@ func (bs *batchState) flushPins(ctx context.Context) error { } // Also do the same for each type of state update, to mark messages dispatched with a new state - confirmTime := fftypes.Now() // All messages get the same confirmed timestamp the Events (not Messages directly) should be used for confirm sequence for msgState, msgIDs := range msgStateUpdates { - values := make([]driver.Value, len(msgIDs)) - for i, msgID := range msgIDs { - bs.data.UpdateMessageStateIfCached(ctx, msgID, msgState, confirmTime) - values[i] = msgID - } - fb := database.MessageQueryFactory.NewFilter(ctx) - filter := fb.In("id", values) - setConfirmed := database.MessageQueryFactory.NewUpdate(ctx). - Set("confirmed", confirmTime). - Set("state", msgState) - if err := bs.database.UpdateMessages(ctx, bs.namespace, filter, setConfirmed); err != nil { + if err := bs.confirmMessages(ctx, msgIDs, msgState, confirmTime, ""); err != nil { return err } } diff --git a/internal/events/aggregator_batch_state_test.go b/internal/events/aggregator_batch_state_test.go index bcf1ecb398..4b8fb7a621 100644 --- a/internal/events/aggregator_batch_state_test.go +++ b/internal/events/aggregator_batch_state_test.go @@ -53,7 +53,7 @@ func TestFlushPinsFailUpdateMessages(t *testing.T) { ag.mdi.On("UpdatePins", ag.ctx, "ns1", mock.Anything, mock.Anything).Return(nil) ag.mdi.On("UpdateMessages", ag.ctx, "ns1", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - ag.mdm.On("UpdateMessageStateIfCached", ag.ctx, msgID, core.MessageStateConfirmed, mock.Anything).Return() + ag.mdm.On("UpdateMessageStateIfCached", ag.ctx, msgID, core.MessageStateConfirmed, mock.Anything, "").Return() bs.markMessageDispatched(fftypes.NewUUID(), &core.Message{ Header: core.MessageHeader{ diff --git a/internal/events/aggregator_test.go b/internal/events/aggregator_test.go index bc3c121a49..2fd95bc796 100644 --- a/internal/events/aggregator_test.go +++ b/internal/events/aggregator_test.go @@ -216,6 +216,7 @@ func TestCacheInitFail(t *testing.T) { _, err := newAggregator(ctx, ns, mdi, mbi, mpm, mdh, mim, mdm, newEventNotifier(ctx, "ut"), mmi, cmi) assert.Equal(t, cacheInitError, err) } + func TestAggregationMaskedZeroNonceMatch(t *testing.T) { ag := newTestAggregatorWithMetrics() @@ -305,7 +306,7 @@ func TestAggregationMaskedZeroNonceMatch(t *testing.T) { // Validate the message is ok ag.mdm.On("GetMessageWithDataCached", ag.ctx, batch.Payload.Messages[0].Header.ID, data.CRORequirePins).Return(batch.Payload.Messages[0], core.DataArray{}, true, nil) ag.mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(true, nil) - ag.mdm.On("UpdateMessageStateIfCached", ag.ctx, mock.Anything, core.MessageStateConfirmed, mock.Anything).Return() + ag.mdm.On("UpdateMessageStateIfCached", ag.ctx, mock.Anything, core.MessageStateConfirmed, mock.Anything, "").Return() // Insert the confirmed event ag.mdi.On("InsertEvent", ag.ctx, mock.MatchedBy(func(e *core.Event) bool { return *e.Reference == *msgID && e.Type == core.EventTypeMessageConfirmed @@ -316,7 +317,7 @@ func TestAggregationMaskedZeroNonceMatch(t *testing.T) { ag.mdi.On("UpdateMessages", ag.ctx, "ns1", mock.Anything, mock.MatchedBy(func(u ffapi.Update) bool { update, err := u.Finalize() assert.NoError(t, err) - assert.Len(t, update.SetOperations, 2) + assert.Len(t, update.SetOperations, 3) assert.Equal(t, "confirmed", update.SetOperations[0].Field) v, err := update.SetOperations[0].Value.Value() @@ -423,7 +424,7 @@ func TestAggregationMaskedNextSequenceMatch(t *testing.T) { // Validate the message is ok ag.mdm.On("GetMessageWithDataCached", ag.ctx, batch.Payload.Messages[0].Header.ID, data.CRORequirePins).Return(batch.Payload.Messages[0], core.DataArray{}, true, nil) ag.mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(true, nil) - ag.mdm.On("UpdateMessageStateIfCached", ag.ctx, mock.Anything, core.MessageStateConfirmed, mock.Anything).Return() + ag.mdm.On("UpdateMessageStateIfCached", ag.ctx, mock.Anything, core.MessageStateConfirmed, mock.Anything, "").Return() // Insert the confirmed event ag.mdi.On("InsertEvent", ag.ctx, mock.MatchedBy(func(e *core.Event) bool { return *e.Reference == *msgID && e.Type == core.EventTypeMessageConfirmed @@ -515,7 +516,7 @@ func TestAggregationBroadcast(t *testing.T) { // Validate the message is ok ag.mdm.On("GetMessageWithDataCached", ag.ctx, batch.Payload.Messages[0].Header.ID, data.CRORequirePublicBlobRefs).Return(batch.Payload.Messages[0], core.DataArray{}, true, nil) ag.mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(true, nil) - ag.mdm.On("UpdateMessageStateIfCached", ag.ctx, mock.Anything, core.MessageStateConfirmed, mock.Anything).Return() + ag.mdm.On("UpdateMessageStateIfCached", ag.ctx, mock.Anything, core.MessageStateConfirmed, mock.Anything, "").Return() // Insert the confirmed event ag.mdi.On("InsertEvent", ag.ctx, mock.MatchedBy(func(e *core.Event) bool { return *e.Reference == *msgID && e.Type == core.EventTypeMessageConfirmed @@ -602,7 +603,7 @@ func TestAggregationMigratedBroadcast(t *testing.T) { // Validate the message is ok ag.mdm.On("GetMessageWithDataCached", ag.ctx, batch.Payload.Messages[0].Header.ID, data.CRORequirePublicBlobRefs).Return(batch.Payload.Messages[0], core.DataArray{}, true, nil) ag.mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(true, nil) - ag.mdm.On("UpdateMessageStateIfCached", ag.ctx, mock.Anything, core.MessageStateConfirmed, mock.Anything).Return() + ag.mdm.On("UpdateMessageStateIfCached", ag.ctx, mock.Anything, core.MessageStateConfirmed, mock.Anything, "").Return() // Insert the confirmed event ag.mdi.On("InsertEvent", ag.ctx, mock.MatchedBy(func(e *core.Event) bool { return *e.Reference == *msgID && e.Type == core.EventTypeMessageConfirmed @@ -1711,19 +1712,13 @@ func TestReadyForDispatchApprovalMismatch(t *testing.T) { } -func TestDefinitionBroadcastActionRejectCustomCorrelator(t *testing.T) { +func TestDefinitionBroadcastActionRejectFailUpdate(t *testing.T) { ag := newTestAggregator() defer ag.cleanup(t) bs := newBatchState(&ag.aggregator) org1 := newTestOrg("org1") - customCorrelator := fftypes.NewUUID() - - ag.mdi.On("InsertEvent", ag.ctx, mock.MatchedBy(func(event *core.Event) bool { - return event.Type == core.EventTypeMessageRejected && event.Correlator.Equals(customCorrelator) - })).Return(nil) - - newState := ag.completeDispatch(core.ActionReject, customCorrelator, &core.Message{ + msg := &core.Message{ Header: core.MessageHeader{ Type: core.MessageTypeDefinition, ID: fftypes.NewUUID(), @@ -1735,10 +1730,22 @@ func TestDefinitionBroadcastActionRejectCustomCorrelator(t *testing.T) { Data: core.DataRefs{ {ID: fftypes.NewUUID()}, }, - }, nil, bs) + } + customCorrelator := fftypes.NewUUID() + + ag.mdi.On("InsertEvent", ag.ctx, mock.MatchedBy(func(event *core.Event) bool { + return event.Type == core.EventTypeMessageRejected && event.Correlator.Equals(customCorrelator) + })).Return(nil) + ag.mdm.On("UpdateMessageStateIfCached", ag.ctx, msg.Header.ID, core.MessageStateRejected, mock.Anything, "reject-reason").Return() + ag.mdi.On("UpdateMessages", ag.ctx, "ns1", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) + + newState := ag.completeDispatch(core.ActionReject, customCorrelator, msg, nil, bs) assert.Equal(t, core.MessageStateRejected, newState) + msg.RejectReason = "reject-reason" + + bs.markMessageDispatched(fftypes.NewUUID(), msg, 0, newState) err := bs.RunFinalize(ag.ctx) - assert.NoError(t, err) + assert.EqualError(t, err, "pop") } func TestDispatchBroadcastQueuesLaterDispatch(t *testing.T) { @@ -1877,13 +1884,49 @@ func TestDefinitionBroadcastActionRetry(t *testing.T) { func TestDefinitionBroadcastActionReject(t *testing.T) { ag := newTestAggregator() defer ag.cleanup(t) + bs := newBatchState(&ag.aggregator) - msg1, _, _, _ := newTestManifest(core.MessageTypeDefinition, nil) + msg1, _, org1, manifest := newTestManifest(core.MessageTypeDefinition, nil) + ag.mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything).Return(org1, nil) + ag.mdm.On("GetMessageWithDataCached", ag.ctx, msg1.Header.ID, data.CRORequirePublicBlobRefs).Return(msg1, core.DataArray{}, true, nil).Once() + ag.mdi.On("GetPins", ag.ctx, "ns1", mock.Anything).Return([]*core.Pin{}, nil, nil).Once() ag.mdh.On("HandleDefinitionBroadcast", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). Return(definitions.HandlerResult{Action: core.ActionReject}, fmt.Errorf("pop")) - _, _, err := ag.readyForDispatch(ag.ctx, msg1, nil, nil, &batchState{}, &core.Pin{Signer: "0x12345"}) + pin1 := &core.Pin{Masked: false, Sequence: 12345, Signer: msg1.Header.Key} + err := ag.processMessage(ag.ctx, manifest, pin1, 0, manifest.Messages[0], &core.BatchPersisted{}, bs) + assert.NoError(t, err) + + ag.mdi.On("InsertEvent", ag.ctx, mock.MatchedBy(func(e *core.Event) bool { + return *e.Reference == *msg1.Header.ID && e.Type == core.EventTypeMessageRejected + })).Return(nil) + ag.mdm.On("UpdateMessageStateIfCached", ag.ctx, mock.Anything, core.MessageStateRejected, mock.Anything, "pop").Return() + ag.mdi.On("UpdateMessages", ag.ctx, "ns1", mock.Anything, mock.MatchedBy(func(u ffapi.Update) bool { + update, err := u.Finalize() + assert.NoError(t, err) + assert.Len(t, update.SetOperations, 3) + + assert.Equal(t, "confirmed", update.SetOperations[0].Field) + v, err := update.SetOperations[0].Value.Value() + assert.NoError(t, err) + assert.Greater(t, v, int64(0)) + + assert.Equal(t, "state", update.SetOperations[1].Field) + v, err = update.SetOperations[1].Value.Value() + assert.NoError(t, err) + assert.Equal(t, "rejected", v) + + assert.Equal(t, "rejectreason", update.SetOperations[2].Field) + v, err = update.SetOperations[2].Value.Value() + assert.NoError(t, err) + assert.Equal(t, "pop", v) + + return true + })).Return(nil) + ag.mdi.On("UpdatePins", ag.ctx, "ns1", mock.Anything, mock.Anything).Return(nil) + + err = bs.RunFinalize(ag.ctx) assert.NoError(t, err) } @@ -1897,8 +1940,8 @@ func TestDefinitionBroadcastRejectSignerLookupWrongOrg(t *testing.T) { ag.mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything).Return(newTestOrg("org2"), nil) action, err := ag.checkOnchainConsistency(ag.ctx, msg1, &core.Pin{Signer: "0x12345"}) - assert.NoError(t, err) assert.Equal(t, core.ActionReject, action) + assert.Regexp(t, "FF10453", err) } @@ -1980,7 +2023,7 @@ func TestReadyForDispatchGroupInit(t *testing.T) { bs := newBatchState(&ag.aggregator) org1 := newTestOrg("org1") - _, _, err := ag.readyForDispatch(ag.ctx, &core.Message{ + action, _, err := ag.readyForDispatch(ag.ctx, &core.Message{ Header: core.MessageHeader{ ID: fftypes.NewUUID(), Type: core.MessageTypeGroupInit, @@ -1988,6 +2031,7 @@ func TestReadyForDispatchGroupInit(t *testing.T) { }, }, nil, nil, bs, &core.Pin{Signer: "0x12345"}) assert.NoError(t, err) + assert.Equal(t, core.ActionConfirm, action) } diff --git a/internal/events/batch_pin_complete.go b/internal/events/batch_pin_complete.go index 2d36cd54fb..7dccbe5cb1 100644 --- a/internal/events/batch_pin_complete.go +++ b/internal/events/batch_pin_complete.go @@ -31,17 +31,19 @@ import ( // // We must block here long enough to get the payload from the sharedstorage, persist the messages in the correct // sequence, and also persist all the data. -func (em *eventManager) BatchPinComplete(namespace string, batchPin *blockchain.BatchPin, signingKey *core.VerifierRef) error { +func (em *eventManager) handleBlockchainBatchPinEvent(ctx context.Context, event *blockchain.BatchPinCompleteEvent, bc *eventBatchContext) error { + batchPin := event.Batch + if em.multiparty == nil { - log.L(em.ctx).Errorf("Ignoring batch pin from non-multiparty network!") + log.L(ctx).Errorf("Ignoring batch pin from non-multiparty network!") return nil } if batchPin.TransactionID == nil { - log.L(em.ctx).Errorf("Invalid BatchPin transaction - ID is nil") + log.L(ctx).Errorf("Invalid BatchPin transaction - ID is nil") return nil // move on } - if namespace != em.namespace.Name { - log.L(em.ctx).Debugf("Ignoring batch pin from different namespace '%s'", namespace) + if event.Namespace != em.namespace.Name { + log.L(ctx).Debugf("Ignoring batch pin from different namespace '%s'", event.Namespace) return nil // move on } @@ -49,52 +51,50 @@ func (em *eventManager) BatchPinComplete(namespace string, batchPin *blockchain. batchPin.TransactionType = core.TransactionTypeBatchPin } - log.L(em.ctx).Infof("-> BatchPinComplete batch=%s txn=%s signingIdentity=%s", batchPin.BatchID, batchPin.Event.ProtocolID, signingKey.Value) + log.L(ctx).Infof("-> BatchPinComplete batch=%s txn=%s signingIdentity=%s", batchPin.BatchID, batchPin.Event.ProtocolID, event.SigningKey.Value) defer func() { - log.L(em.ctx).Infof("<- BatchPinComplete batch=%s txn=%s signingIdentity=%s", batchPin.BatchID, batchPin.Event.ProtocolID, signingKey.Value) + log.L(ctx).Infof("<- BatchPinComplete batch=%s txn=%s signingIdentity=%s", batchPin.BatchID, batchPin.Event.ProtocolID, event.SigningKey.Value) }() - log.L(em.ctx).Tracef("BatchPinComplete batch=%s info: %+v", batchPin.BatchID, batchPin.Event.Info) + log.L(ctx).Tracef("BatchPinComplete batch=%s info: %+v", batchPin.BatchID, batchPin.Event.Info) - // Here we simple record all the pins as parked, and emit an event for the aggregator - // to check whether the messages in the batch have been written. - return em.retry.Do(em.ctx, "persist batch pins", func(attempt int) (bool, error) { - // We process the batch into the DB as a single transaction (if transactions are supported), both for - // efficiency and to minimize the chance of duplicates (although at-least-once delivery is the core model) - err := em.database.RunAsGroup(em.ctx, func(ctx context.Context) error { - if err := em.persistBatchTransaction(ctx, batchPin); err != nil { - return err - } - chainEvent := buildBlockchainEvent(em.namespace.Name, nil, &batchPin.Event, &core.BlockchainTransactionRef{ - Type: batchPin.TransactionType, - ID: batchPin.TransactionID, - BlockchainID: batchPin.Event.BlockchainTXID, - }) - if err := em.maybePersistBlockchainEvent(ctx, chainEvent, nil); err != nil { - return err - } - em.emitBlockchainEventMetric(&batchPin.Event) - private := batchPin.BatchPayloadRef == "" - if err := em.persistContexts(ctx, batchPin, signingKey, private); err != nil { - return err - } + if err := em.persistBatchTransaction(ctx, batchPin); err != nil { + return err + } + chainEvent := buildBlockchainEvent(em.namespace.Name, nil, &batchPin.Event, &core.BlockchainTransactionRef{ + Type: batchPin.TransactionType, + ID: batchPin.TransactionID, + BlockchainID: batchPin.Event.BlockchainTXID, + }) + // Defer the event insert itself to the end + bc.addEventToInsert(chainEvent, em.getTopicForChainListener(nil)) + bc.postInsert = append(bc.postInsert, func() error { + em.emitBlockchainEventMetric(&batchPin.Event) + return em.postBlockchainBatchPinEventInsert(ctx, event) + }) + return nil +} - batch, _, err := em.aggregator.GetBatchForPin(ctx, &core.Pin{ - Batch: batchPin.BatchID, - BatchHash: batchPin.BatchHash, - }) - if err != nil { - return err - } - // Kick off a download for broadcast batches if the batch isn't already persisted - if !private && batch == nil { - if err := em.sharedDownload.InitiateDownloadBatch(ctx, batchPin.TransactionID, batchPin.BatchPayloadRef); err != nil { - return err - } - } - return nil - }) - return err != nil, err // retry indefinitely (until context closes) +func (em *eventManager) postBlockchainBatchPinEventInsert(ctx context.Context, event *blockchain.BatchPinCompleteEvent) error { + batchPin := event.Batch + private := batchPin.BatchPayloadRef == "" + if err := em.persistContexts(ctx, batchPin, event.SigningKey, private); err != nil { + return err + } + + batch, _, err := em.aggregator.GetBatchForPin(ctx, &core.Pin{ + Batch: batchPin.BatchID, + BatchHash: batchPin.BatchHash, }) + if err != nil { + return err + } + // Kick off a download for broadcast batches if the batch isn't already persisted + if !private && batch == nil { + if err := em.sharedDownload.InitiateDownloadBatch(ctx, batchPin.TransactionID, batchPin.BatchPayloadRef, false /* batch processing does not currently use idempotency keys */); err != nil { + return err + } + } + return nil } func (em *eventManager) persistBatchTransaction(ctx context.Context, batchPin *blockchain.BatchPin) error { diff --git a/internal/events/batch_pin_complete_test.go b/internal/events/batch_pin_complete_test.go index 5fb4a6b22f..fc78d63ac8 100644 --- a/internal/events/batch_pin_complete_test.go +++ b/internal/events/batch_pin_complete_test.go @@ -118,22 +118,28 @@ func TestBatchPinCompleteOkBroadcast(t *testing.T) { } } - em.mth.On("InsertOrGetBlockchainEvent", mock.Anything, mock.MatchedBy(func(e *core.BlockchainEvent) bool { - return e.Name == batchPin.Event.Name - })).Return(nil, fmt.Errorf("pop")).Once() - em.mth.On("InsertOrGetBlockchainEvent", mock.Anything, mock.MatchedBy(func(e *core.BlockchainEvent) bool { - return e.Name == batchPin.Event.Name - })).Return(nil, nil).Once() + em.mth.On("InsertNewBlockchainEvents", mock.Anything, mock.MatchedBy(func(e []*core.BlockchainEvent) bool { + return e[0].Name == batchPin.Event.Name + })).Return([]*core.BlockchainEvent{{ID: fftypes.NewUUID()}}, nil).Once() em.mdi.On("InsertEvent", mock.Anything, mock.MatchedBy(func(e *core.Event) bool { return e.Type == core.EventTypeBlockchainEventReceived })).Return(nil).Once() em.mdi.On("InsertPins", mock.Anything, mock.Anything).Return(nil).Once() em.mdi.On("GetBatchByID", mock.Anything, "ns1", mock.Anything).Return(nil, nil) - em.msd.On("InitiateDownloadBatch", mock.Anything, batchPin.TransactionID, batchPin.BatchPayloadRef).Return(nil) - - err := em.BatchPinComplete("ns1", batchPin, &core.VerifierRef{ - Type: core.VerifierTypeEthAddress, - Value: "0x12345", + em.msd.On("InitiateDownloadBatch", mock.Anything, batchPin.TransactionID, batchPin.BatchPayloadRef, false).Return(nil) + + err := em.BlockchainEventBatch([]*blockchain.EventToDispatch{ + { + Type: blockchain.EventTypeBatchPinComplete, + BatchPinComplete: &blockchain.BatchPinCompleteEvent{ + Namespace: "ns1", + Batch: batchPin, + SigningKey: &core.VerifierRef{ + Type: core.VerifierTypeEthAddress, + Value: "0x12345", + }, + }, + }, }) assert.NoError(t, err) @@ -182,21 +188,27 @@ func TestBatchPinCompleteOkBroadcastExistingBatch(t *testing.T) { } } - em.mth.On("InsertOrGetBlockchainEvent", mock.Anything, mock.MatchedBy(func(e *core.BlockchainEvent) bool { - return e.Name == batchPin.Event.Name - })).Return(nil, fmt.Errorf("pop")).Once() - em.mth.On("InsertOrGetBlockchainEvent", mock.Anything, mock.MatchedBy(func(e *core.BlockchainEvent) bool { - return e.Name == batchPin.Event.Name - })).Return(nil, nil).Once() + em.mth.On("InsertNewBlockchainEvents", mock.Anything, mock.MatchedBy(func(e []*core.BlockchainEvent) bool { + return e[0].Name == batchPin.Event.Name + })).Return([]*core.BlockchainEvent{{ID: fftypes.NewUUID()}}, nil).Once() em.mdi.On("InsertEvent", mock.Anything, mock.MatchedBy(func(e *core.Event) bool { return e.Type == core.EventTypeBlockchainEventReceived })).Return(nil).Once() em.mdi.On("InsertPins", mock.Anything, mock.Anything).Return(nil).Once() em.mdi.On("GetBatchByID", mock.Anything, "ns1", mock.Anything).Return(batchPersisted, nil) - err := em.BatchPinComplete("ns1", batchPin, &core.VerifierRef{ - Type: core.VerifierTypeEthAddress, - Value: "0x12345", + err := em.BlockchainEventBatch([]*blockchain.EventToDispatch{ + { + Type: blockchain.EventTypeBatchPinComplete, + BatchPinComplete: &blockchain.BatchPinCompleteEvent{ + Namespace: "ns1", + Batch: batchPin, + SigningKey: &core.VerifierRef{ + Type: core.VerifierTypeEthAddress, + Value: "0x12345", + }, + }, + }, }) assert.NoError(t, err) @@ -221,13 +233,22 @@ func TestBatchPinCompleteOkPrivate(t *testing.T) { em.mdi.On("RunAsGroup", mock.Anything, mock.Anything).Return(nil) em.mdi.On("InsertPins", mock.Anything, mock.Anything).Return(fmt.Errorf("These pins have been seen before")) // simulate replay fallback em.mdi.On("UpsertPin", mock.Anything, mock.Anything).Return(nil) - em.mth.On("InsertOrGetBlockchainEvent", mock.Anything, mock.Anything).Return(nil, nil) + em.mth.On("InsertNewBlockchainEvents", mock.Anything, mock.Anything).Return([]*core.BlockchainEvent{{ID: fftypes.NewUUID()}}, nil) em.mdi.On("InsertEvent", mock.Anything, mock.Anything).Return(nil) em.mdi.On("GetBatchByID", mock.Anything, "ns1", mock.Anything).Return(nil, nil) - err := em.BatchPinComplete("ns1", batchPin, &core.VerifierRef{ - Type: core.VerifierTypeEthAddress, - Value: "0xffffeeee", + err := em.BlockchainEventBatch([]*blockchain.EventToDispatch{ + { + Type: blockchain.EventTypeBatchPinComplete, + BatchPinComplete: &blockchain.BatchPinCompleteEvent{ + Namespace: "ns1", + Batch: batchPin, + SigningKey: &core.VerifierRef{ + Type: core.VerifierTypeEthAddress, + Value: "0xffffeeee", + }, + }, + }, }) assert.NoError(t, err) @@ -259,12 +280,23 @@ func TestBatchPinCompleteInsertPinsFail(t *testing.T) { em.mdi.On("RunAsGroup", mock.Anything, mock.Anything).Return(nil) em.mdi.On("InsertPins", mock.Anything, mock.Anything).Return(fmt.Errorf("optimization miss")) em.mdi.On("UpsertPin", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - em.mth.On("InsertOrGetBlockchainEvent", mock.Anything, mock.Anything).Return(nil, nil) + em.mth.On("InsertNewBlockchainEvents", mock.Anything, mock.MatchedBy(func(e []*core.BlockchainEvent) bool { + return e[0].Name == batchPin.Event.Name + })).Return([]*core.BlockchainEvent{{ID: fftypes.NewUUID()}}, nil).Once() em.mdi.On("InsertEvent", mock.Anything, mock.Anything).Return(nil) - err := em.BatchPinComplete("ns1", batchPin, &core.VerifierRef{ - Type: core.VerifierTypeEthAddress, - Value: "0xffffeeee", + err := em.BlockchainEventBatch([]*blockchain.EventToDispatch{ + { + Type: blockchain.EventTypeBatchPinComplete, + BatchPinComplete: &blockchain.BatchPinCompleteEvent{ + Namespace: "ns1", + Batch: batchPin, + SigningKey: &core.VerifierRef{ + Type: core.VerifierTypeEthAddress, + Value: "0xffffeeee", + }, + }, + }, }) assert.Regexp(t, "FF00154", err) @@ -289,13 +321,24 @@ func TestBatchPinCompleteGetBatchByIDFails(t *testing.T) { em.mdi.On("RunAsGroup", mock.Anything, mock.Anything).Return(nil) em.mdi.On("InsertPins", mock.Anything, mock.Anything).Return(nil) - em.mth.On("InsertOrGetBlockchainEvent", mock.Anything, mock.Anything).Return(nil, nil) + em.mth.On("InsertNewBlockchainEvents", mock.Anything, mock.MatchedBy(func(e []*core.BlockchainEvent) bool { + return e[0].Name == batchPin.Event.Name + })).Return([]*core.BlockchainEvent{{ID: fftypes.NewUUID()}}, nil).Once() em.mdi.On("InsertEvent", mock.Anything, mock.Anything).Return(nil) em.mdi.On("GetBatchByID", mock.Anything, "ns1", mock.Anything).Return(nil, fmt.Errorf("batch lookup failed")) - err := em.BatchPinComplete("ns1", batchPin, &core.VerifierRef{ - Type: core.VerifierTypeEthAddress, - Value: "0xffffeeee", + err := em.BlockchainEventBatch([]*blockchain.EventToDispatch{ + { + Type: blockchain.EventTypeBatchPinComplete, + BatchPinComplete: &blockchain.BatchPinCompleteEvent{ + Namespace: "ns1", + Batch: batchPin, + SigningKey: &core.VerifierRef{ + Type: core.VerifierTypeEthAddress, + Value: "0xffffeeee", + }, + }, + }, }) assert.Regexp(t, "FF00154", err) @@ -320,15 +363,26 @@ func TestSequencedBroadcastInitiateDownloadFail(t *testing.T) { em.mth.On("PersistTransaction", mock.Anything, batchPin.TransactionID, core.TransactionTypeBatchPin, "0x12345").Return(true, nil) - em.mth.On("InsertOrGetBlockchainEvent", mock.Anything, mock.Anything).Return(nil, nil) + em.mth.On("InsertNewBlockchainEvents", mock.Anything, mock.MatchedBy(func(e []*core.BlockchainEvent) bool { + return e[0].Name == batchPin.Event.Name + })).Return([]*core.BlockchainEvent{{ID: fftypes.NewUUID()}}, nil).Once() em.mdi.On("InsertEvent", mock.Anything, mock.Anything).Return(nil) em.mdi.On("InsertPins", mock.Anything, mock.Anything).Return(nil) em.mdi.On("GetBatchByID", mock.Anything, "ns1", mock.Anything).Return(nil, nil) - em.msd.On("InitiateDownloadBatch", mock.Anything, batchPin.TransactionID, batchPin.BatchPayloadRef).Return(fmt.Errorf("pop")) - - err := em.BatchPinComplete("ns1", batchPin, &core.VerifierRef{ - Type: core.VerifierTypeEthAddress, - Value: "0xffffeeee", + em.msd.On("InitiateDownloadBatch", mock.Anything, batchPin.TransactionID, batchPin.BatchPayloadRef, false).Return(fmt.Errorf("pop")) + + err := em.BlockchainEventBatch([]*blockchain.EventToDispatch{ + { + Type: blockchain.EventTypeBatchPinComplete, + BatchPinComplete: &blockchain.BatchPinCompleteEvent{ + Namespace: "ns1", + Batch: batchPin, + SigningKey: &core.VerifierRef{ + Type: core.VerifierTypeEthAddress, + Value: "0xffffeeee", + }, + }, + }, }) assert.Regexp(t, "FF00154", err) } @@ -339,9 +393,18 @@ func TestBatchPinCompleteNoTX(t *testing.T) { batch := &blockchain.BatchPin{} - err := em.BatchPinComplete("ns1", batch, &core.VerifierRef{ - Type: core.VerifierTypeEthAddress, - Value: "0x12345", + err := em.BlockchainEventBatch([]*blockchain.EventToDispatch{ + { + Type: blockchain.EventTypeBatchPinComplete, + BatchPinComplete: &blockchain.BatchPinCompleteEvent{ + Namespace: "ns1", + Batch: batch, + SigningKey: &core.VerifierRef{ + Type: core.VerifierTypeEthAddress, + Value: "0x12345", + }, + }, + }, }) assert.NoError(t, err) } @@ -357,9 +420,18 @@ func TestBatchPinCompleteWrongNamespace(t *testing.T) { }, } - err := em.BatchPinComplete("ns2", batch, &core.VerifierRef{ - Type: core.VerifierTypeEthAddress, - Value: "0x12345", + err := em.BlockchainEventBatch([]*blockchain.EventToDispatch{ + { + Type: blockchain.EventTypeBatchPinComplete, + BatchPinComplete: &blockchain.BatchPinCompleteEvent{ + Namespace: "ns2", + Batch: batch, + SigningKey: &core.VerifierRef{ + Type: core.VerifierTypeEthAddress, + Value: "0xffffeeee", + }, + }, + }, }) assert.NoError(t, err) } @@ -376,9 +448,18 @@ func TestBatchPinCompleteNonMultiparty(t *testing.T) { }, } - err := em.BatchPinComplete("ns1", batch, &core.VerifierRef{ - Type: core.VerifierTypeEthAddress, - Value: "0x12345", + err := em.BlockchainEventBatch([]*blockchain.EventToDispatch{ + { + Type: blockchain.EventTypeBatchPinComplete, + BatchPinComplete: &blockchain.BatchPinCompleteEvent{ + Namespace: "ns1", + Batch: batch, + SigningKey: &core.VerifierRef{ + Type: core.VerifierTypeEthAddress, + Value: "0x12345", + }, + }, + }, }) assert.NoError(t, err) } @@ -688,7 +769,7 @@ func TestPersistBatchDataWithPublicInitiateDownload(t *testing.T) { em.mdi.On("GetBlobs", mock.Anything, mock.Anything, mock.Anything).Return([]*core.Blob{}, nil, nil) - em.msd.On("InitiateDownloadBlob", mock.Anything, batch.Payload.TX.ID, data.ID, "ref1").Return(nil) + em.msd.On("InitiateDownloadBlob", mock.Anything, batch.Payload.TX.ID, data.ID, "ref1", false).Return(nil) valid, err := em.checkAndInitiateBlobDownloads(context.Background(), batch, 0, data) assert.Nil(t, err) @@ -713,7 +794,7 @@ func TestPersistBatchDataWithPublicInitiateDownloadFail(t *testing.T) { em.mdi.On("GetBlobs", mock.Anything, mock.Anything, mock.Anything).Return([]*core.Blob{}, nil, nil) - em.msd.On("InitiateDownloadBlob", mock.Anything, batch.Payload.TX.ID, data.ID, "ref1").Return(fmt.Errorf("pop")) + em.msd.On("InitiateDownloadBlob", mock.Anything, batch.Payload.TX.ID, data.ID, "ref1", false).Return(fmt.Errorf("pop")) valid, err := em.checkAndInitiateBlobDownloads(context.Background(), batch, 0, data) assert.Regexp(t, "pop", err) diff --git a/internal/events/blockchain_event.go b/internal/events/blockchain_event.go index 989bcb49ba..e4541bbf0f 100644 --- a/internal/events/blockchain_event.go +++ b/internal/events/blockchain_event.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -26,6 +26,18 @@ import ( "github.com/hyperledger/firefly/pkg/core" ) +type eventBatchContext struct { + contractListenerResults map[string]*core.ContractListener + topicsByEventID map[string]string + chainEventsToInsert []*core.BlockchainEvent + postInsert []func() error +} + +func (bc *eventBatchContext) addEventToInsert(event *core.BlockchainEvent, topic string) { + bc.chainEventsToInsert = append(bc.chainEventsToInsert, event) + bc.topicsByEventID[event.ID.String()] = topic +} + func buildBlockchainEvent(ns string, subID *fftypes.UUID, event *blockchain.Event, tx *core.BlockchainTransactionRef) *core.BlockchainEvent { ev := &core.BlockchainEvent{ ID: fftypes.NewUUID(), @@ -44,10 +56,35 @@ func buildBlockchainEvent(ns string, subID *fftypes.UUID, event *blockchain.Even return ev } -func (em *eventManager) getChainListenerByProtocolIDCached(ctx context.Context, protocolID string) (*core.ContractListener, error) { - return em.getChainListenerCached(fmt.Sprintf("pid:%s", protocolID), func() (*core.ContractListener, error) { +func (em *eventManager) getChainListenerByProtocolIDCached(ctx context.Context, protocolID string, bc *eventBatchContext) (*core.ContractListener, error) { + // Event a negative result is cached in te scope of the event batch (so we don't spam the DB hundreds of times in one tight loop to get not-found) + if l, batchResult := bc.contractListenerResults[protocolID]; batchResult { + return l, nil + } + l, err := em.getChainListenerCached(fmt.Sprintf("pid:%s", protocolID), func() (*core.ContractListener, error) { return em.database.GetContractListenerByBackendID(ctx, em.namespace.Name, protocolID) }) + if err != nil { + return nil, err + } + bc.contractListenerResults[protocolID] = l // includes nil results + return l, nil +} + +func (em *eventManager) maybePersistBlockchainEvent(ctx context.Context, chainEvent *core.BlockchainEvent, listener *core.ContractListener) error { + existing, err := em.txHelper.InsertOrGetBlockchainEvent(ctx, chainEvent) + if err != nil { + return err + } + if existing != nil { + log.L(ctx).Debugf("Ignoring duplicate blockchain event %s", chainEvent.ProtocolID) + // Return the ID of the existing event + chainEvent.ID = existing.ID + return nil + } + topic := em.getTopicForChainListener(listener) + ffEvent := core.NewEvent(core.EventTypeBlockchainEventReceived, chainEvent.Namespace, chainEvent.ID, chainEvent.TX.ID, topic) + return em.database.InsertEvent(ctx, ffEvent) } func (em *eventManager) getChainListenerCached(cacheKey string, getter func() (*core.ContractListener, error)) (*core.ContractListener, error) { @@ -76,19 +113,19 @@ func (em *eventManager) getTopicForChainListener(listener *core.ContractListener return topic } -func (em *eventManager) maybePersistBlockchainEvent(ctx context.Context, chainEvent *core.BlockchainEvent, listener *core.ContractListener) error { - if existing, err := em.txHelper.InsertOrGetBlockchainEvent(ctx, chainEvent); err != nil { +func (em *eventManager) maybePersistBlockchainEvents(ctx context.Context, bc *eventBatchContext) error { + // Attempt to insert all the events in one go, using efficient InsertMany semantics + inserted, err := em.txHelper.InsertNewBlockchainEvents(ctx, bc.chainEventsToInsert) + if err != nil { return err - } else if existing != nil { - log.L(ctx).Debugf("Ignoring duplicate blockchain event %s", chainEvent.ProtocolID) - // Return the ID of the existing event - chainEvent.ID = existing.ID - return nil } - topic := em.getTopicForChainListener(listener) - ffEvent := core.NewEvent(core.EventTypeBlockchainEventReceived, chainEvent.Namespace, chainEvent.ID, chainEvent.TX.ID, topic) - if err := em.database.InsertEvent(ctx, ffEvent); err != nil { - return err + // Only the ones newly inserted need events emitting + for _, chainEvent := range inserted { + topic := bc.topicsByEventID[chainEvent.ID.String()] // bc.addEvent() ensures this is there + ffEvent := core.NewEvent(core.EventTypeBlockchainEventReceived, chainEvent.Namespace, chainEvent.ID, chainEvent.TX.ID, topic) + if err := em.database.InsertEvent(ctx, ffEvent); err != nil { + return err + } } return nil } @@ -99,32 +136,66 @@ func (em *eventManager) emitBlockchainEventMetric(event *blockchain.Event) { } } -func (em *eventManager) BlockchainEvent(event *blockchain.EventWithSubscription) error { +func (em *eventManager) BlockchainEventBatch(batch []*blockchain.EventToDispatch) error { return em.retry.Do(em.ctx, "persist blockchain event", func(attempt int) (bool, error) { - err := em.database.RunAsGroup(em.ctx, func(ctx context.Context) error { - listener, err := em.getChainListenerByProtocolIDCached(ctx, event.Subscription) - if err != nil { - return err + bc := &eventBatchContext{ + contractListenerResults: make(map[string]*core.ContractListener), + topicsByEventID: make(map[string]string), + } + return true, em.database.RunAsGroup(em.ctx, func(ctx context.Context) error { + // Process the events, generating the optimized list of event inserts + for _, event := range batch { + switch event.Type { + case blockchain.EventTypeForListener: + if err := em.handleBlockchainEventForListener(ctx, event.ForListener, bc); err != nil { + return err + } + case blockchain.EventTypeBatchPinComplete: + if err := em.handleBlockchainBatchPinEvent(ctx, event.BatchPinComplete, bc); err != nil { + return err + } + case blockchain.EventTypeNetworkAction: + if err := em.handleBlockchainNetworkAction(ctx, event.NetworkAction, bc); err != nil { + return err + } + } } - if listener == nil { - log.L(ctx).Warnf("Event received from unknown subscription %s", event.Subscription) - return nil // no retry + // Do the optimized inserts + if len(bc.chainEventsToInsert) > 0 { + if err := em.maybePersistBlockchainEvents(ctx, bc); err != nil { + return err + } } - if listener.Namespace != em.namespace.Name { - log.L(em.ctx).Debugf("Ignoring blockchain event from different namespace '%s'", listener.Namespace) - return nil + // Batch pins require processing after the event is inserted + for _, postEvent := range bc.postInsert { + if err := postEvent(); err != nil { + return err + } } - listener.Namespace = em.namespace.Name - - chainEvent := buildBlockchainEvent(listener.Namespace, listener.ID, &event.Event, &core.BlockchainTransactionRef{ - BlockchainID: event.BlockchainTXID, - }) - if err := em.maybePersistBlockchainEvent(ctx, chainEvent, listener); err != nil { - return err - } - em.emitBlockchainEventMetric(&event.Event) return nil }) - return err != nil, err }) } + +func (em *eventManager) handleBlockchainEventForListener(ctx context.Context, event *blockchain.EventForListener, bc *eventBatchContext) error { + listener, err := em.getChainListenerByProtocolIDCached(ctx, event.ListenerID, bc) + if err != nil { + return err + } + if listener == nil { + log.L(ctx).Warnf("Event received from unknown subscription %s", event.ListenerID) + return nil // no retry + } + if listener.Namespace != em.namespace.Name { + log.L(ctx).Debugf("Ignoring blockchain event from different namespace '%s'", listener.Namespace) + return nil + } + listener.Namespace = em.namespace.Name + + chainEvent := buildBlockchainEvent(listener.Namespace, listener.ID, event.Event, &core.BlockchainTransactionRef{ + BlockchainID: event.BlockchainTXID, + }) + bc.addEventToInsert(chainEvent, em.getTopicForChainListener(listener)) + em.emitBlockchainEventMetric(event.Event) + return nil +} diff --git a/internal/events/blockchain_event_test.go b/internal/events/blockchain_event_test.go index eb8352a9ce..987c2d8a56 100644 --- a/internal/events/blockchain_event_test.go +++ b/internal/events/blockchain_event_test.go @@ -31,9 +31,9 @@ func TestContractEventWithRetries(t *testing.T) { em := newTestEventManager(t) defer em.cleanup(t) - ev := &blockchain.EventWithSubscription{ - Subscription: "sb-1", - Event: blockchain.Event{ + ev := &blockchain.EventForListener{ + ListenerID: "sb-1", + Event: &blockchain.Event{ BlockchainTXID: "0xabcd1234", ProtocolID: "10/20/30", Name: "Changed", @@ -52,19 +52,32 @@ func TestContractEventWithRetries(t *testing.T) { } var eventID *fftypes.UUID - em.mdi.On("GetContractListenerByBackendID", mock.Anything, "ns1", "sb-1").Return(nil, fmt.Errorf("pop")).Once() + em.mdi.On("GetContractListenerByBackendID", mock.Anything, "ns1", "sb-1").Return(nil, fmt.Errorf("snap")).Once() em.mdi.On("GetContractListenerByBackendID", mock.Anything, "ns1", "sb-1").Return(sub, nil).Times(1) // cached - em.mth.On("InsertOrGetBlockchainEvent", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("pop")).Once() - em.mth.On("InsertOrGetBlockchainEvent", mock.Anything, mock.MatchedBy(func(e *core.BlockchainEvent) bool { + em.mth.On("InsertNewBlockchainEvents", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("crackle")).Once() + mInsert := em.mth.On("InsertNewBlockchainEvents", mock.Anything, mock.MatchedBy(func(events []*core.BlockchainEvent) bool { + if len(events) != 1 { + return false + } + e := events[0] eventID = e.ID return *e.Listener == *sub.ID && e.Name == "Changed" && e.Namespace == "ns1" - })).Return(nil, nil).Times(2) + })).Times(2) + mInsert.Run(func(args mock.Arguments) { + // Mock return for all-new events + mInsert.Return(args[1].([]*core.BlockchainEvent), nil) + }) em.mdi.On("InsertEvent", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")).Once() em.mdi.On("InsertEvent", mock.Anything, mock.MatchedBy(func(e *core.Event) bool { - return e.Type == core.EventTypeBlockchainEventReceived && e.Reference != nil && e.Reference == eventID && e.Topic == "topic1" + return e.Type == core.EventTypeBlockchainEventReceived && e.Reference != nil && e.Reference.Equals(eventID) && e.Topic == "topic1" })).Return(nil).Once() - err := em.BlockchainEvent(ev) + err := em.BlockchainEventBatch([]*blockchain.EventToDispatch{ + { + Type: blockchain.EventTypeForListener, + ForListener: ev, + }, + }) assert.NoError(t, err) } @@ -73,9 +86,9 @@ func TestContractEventUnknownSubscription(t *testing.T) { em := newTestEventManager(t) defer em.cleanup(t) - ev := &blockchain.EventWithSubscription{ - Subscription: "sb-1", - Event: blockchain.Event{ + ev := &blockchain.EventForListener{ + ListenerID: "sb-1", + Event: &blockchain.Event{ BlockchainTXID: "0xabcd1234", Name: "Changed", Output: fftypes.JSONObject{ @@ -89,7 +102,12 @@ func TestContractEventUnknownSubscription(t *testing.T) { em.mdi.On("GetContractListenerByBackendID", mock.Anything, "ns1", "sb-1").Return(nil, nil) - err := em.BlockchainEvent(ev) + err := em.BlockchainEventBatch([]*blockchain.EventToDispatch{ + { + Type: blockchain.EventTypeForListener, + ForListener: ev, + }, + }) assert.NoError(t, err) } @@ -98,9 +116,9 @@ func TestContractEventWrongNS(t *testing.T) { em := newTestEventManager(t) defer em.cleanup(t) - ev := &blockchain.EventWithSubscription{ - Subscription: "sb-1", - Event: blockchain.Event{ + ev := &blockchain.EventForListener{ + ListenerID: "sb-1", + Event: &blockchain.Event{ BlockchainTXID: "0xabcd1234", Name: "Changed", Output: fftypes.JSONObject{ @@ -119,7 +137,12 @@ func TestContractEventWrongNS(t *testing.T) { em.mdi.On("GetContractListenerByBackendID", mock.Anything, "ns1", "sb-1").Return(sub, nil) - err := em.BlockchainEvent(ev) + err := em.BlockchainEventBatch([]*blockchain.EventToDispatch{ + { + Type: blockchain.EventTypeForListener, + ForListener: ev, + }, + }) assert.NoError(t, err) } @@ -143,7 +166,8 @@ func TestPersistBlockchainEventDuplicate(t *testing.T) { } existingID := fftypes.NewUUID() - em.mth.On("InsertOrGetBlockchainEvent", mock.Anything, ev).Return(&core.BlockchainEvent{ID: existingID}, nil) + em.mth.On("InsertOrGetBlockchainEvent", mock.Anything, ev). + Return(&core.BlockchainEvent{ID: existingID}, nil) err := em.maybePersistBlockchainEvent(em.ctx, ev, nil) assert.NoError(t, err) diff --git a/internal/events/event_dispatcher.go b/internal/events/event_dispatcher.go index e05db76e53..a862d2dd57 100644 --- a/internal/events/event_dispatcher.go +++ b/internal/events/event_dispatcher.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -20,6 +20,7 @@ import ( "context" "fmt" "sync" + "time" "github.com/hyperledger/firefly-common/pkg/config" "github.com/hyperledger/firefly-common/pkg/ffapi" @@ -39,7 +40,8 @@ import ( ) const ( - maxReadAhead = 65536 + maxReadAhead = 65536 + defaultBatchTimeout = time.Duration(2) * time.Second ) type ackNack struct { @@ -67,6 +69,8 @@ type eventDispatcher struct { mux sync.Mutex namespace string readAhead int + batch bool + batchTimeout time.Duration subscription *subscription txHelper txcommon.Helper } @@ -80,6 +84,15 @@ func newEventDispatcher(ctx context.Context, enricher *eventEnricher, ei events. if readAhead > maxReadAhead { readAhead = maxReadAhead } + + batchTimeout := defaultBatchTimeout + if sub.definition.Options.BatchTimeout != nil && *sub.definition.Options.BatchTimeout != "" { + batchTimeout = fftypes.ParseToDuration(*sub.definition.Options.BatchTimeout) + } + batch := false + if sub.definition.Options.Batch != nil { + batch = *sub.definition.Options.Batch + } ed := &eventDispatcher{ ctx: log.WithLogField(log.WithLogField(ctx, "role", fmt.Sprintf("ed[%s]", connID)), @@ -100,6 +113,8 @@ func newEventDispatcher(ctx context.Context, enricher *eventEnricher, ei events. acksNacks: make(chan ackNack), closed: make(chan struct{}), txHelper: txHelper, + batch: batch, + batchTimeout: batchTimeout, } pollerConf := &eventPollerConf{ @@ -146,10 +161,15 @@ func (ed *eventDispatcher) electAndStart() { l.Debugf("Closed before we became leader") return } - // We're ready to go - not + // We're ready to go ed.elected = true ed.eventPoller.start() - go ed.deliverEvents() + + if ed.batch { + go ed.deliverBatchedEvents() + } else { + go ed.deliverEvents() + } // Wait until the event poller closes <-ed.eventPoller.closed } @@ -284,22 +304,22 @@ func (ed *eventDispatcher) bufferedDelivery(events []core.LocallySequenced) (boo // or a reset event happens for { ed.mux.Lock() - var disapatchable []*core.EventDelivery + var dispatchable []*core.EventDelivery inflightCount := len(ed.inflight) maxDispatch := 1 + ed.readAhead - inflightCount if maxDispatch >= len(matching) { - disapatchable = matching + dispatchable = matching matching = nil } else if maxDispatch > 0 { - disapatchable = matching[0:maxDispatch] + dispatchable = matching[0:maxDispatch] matching = matching[maxDispatch:] } ed.mux.Unlock() l.Debugf("Dispatcher event state: readahead=%d candidates=%d matched=%d inflight=%d queued=%d dispatched=%d dispatchable=%d lastAck=%d nacks=%d highest=%d", - ed.readAhead, len(candidates), matchCount, inflightCount, len(matching), dispatched, len(disapatchable), lastAck, nacks, highestOffset) + ed.readAhead, len(candidates), matchCount, inflightCount, len(matching), dispatched, len(dispatchable), lastAck, nacks, highestOffset) - for _, event := range disapatchable { + for _, event := range dispatchable { ed.mux.Lock() ed.inflight[*event.ID] = &event.Event inflightCount = len(ed.inflight) @@ -364,6 +384,66 @@ func (ed *eventDispatcher) handleAckOffsetUpdate(ack ackNack) { } } +func (ed *eventDispatcher) deliverBatchedEvents() { + withData := ed.subscription.definition.Options.WithData != nil && *ed.subscription.definition.Options.WithData + + var events []*core.CombinedEventDataDelivery + var batchTimeoutContext context.Context + var batchTimeoutCancel func() + for { + var timeoutContext context.Context + var timedOut bool + if batchTimeoutContext != nil { + timeoutContext = batchTimeoutContext + } else { + timeoutContext = ed.ctx + } + select { + case event, ok := <-ed.eventDelivery: + if !ok { + if batchTimeoutCancel != nil { + batchTimeoutCancel() + } + return + } + + if events == nil { + events = []*core.CombinedEventDataDelivery{} + batchTimeoutContext, batchTimeoutCancel = context.WithTimeout(ed.ctx, ed.batchTimeout) + } + + log.L(ed.ctx).Debugf("Dispatching %s event in a batch: %.10d/%s [%s]: ref=%s/%s", ed.transport.Name(), event.Sequence, event.ID, event.Type, event.Namespace, event.Reference) + + var data []*core.Data + var err error + if withData && event.Message != nil { + data, _, err = ed.data.GetMessageDataCached(ed.ctx, event.Message) + } + + events = append(events, &core.CombinedEventDataDelivery{Event: event, Data: data}) + + if err != nil { + ed.deliveryResponse(&core.EventDeliveryResponse{ID: event.ID, Rejected: true}) + } + + case <-timeoutContext.Done(): + timedOut = true + case <-ed.ctx.Done(): + if batchTimeoutCancel != nil { + batchTimeoutCancel() + } + return + } + + if len(events) == ed.readAhead || (timedOut && len(events) > 0) { + _ = ed.transport.BatchDeliveryRequest(ed.ctx, ed.connID, ed.subscription.definition, events) + // If err handle all the delivery responses for all the events?? + events = nil + } + } +} + +// TODO issue here, we can't just call DeliveryRequest with one thing. func (ed *eventDispatcher) deliverEvents() { withData := ed.subscription.definition.Options.WithData != nil && *ed.subscription.definition.Options.WithData for { @@ -372,14 +452,16 @@ func (ed *eventDispatcher) deliverEvents() { if !ok { return } + log.L(ed.ctx).Debugf("Dispatching %s event: %.10d/%s [%s]: ref=%s/%s", ed.transport.Name(), event.Sequence, event.ID, event.Type, event.Namespace, event.Reference) var data []*core.Data var err error if withData && event.Message != nil { data, _, err = ed.data.GetMessageDataCached(ed.ctx, event.Message) } + if err == nil { - err = ed.transport.DeliveryRequest(ed.connID, ed.subscription.definition, event, data) + err = ed.transport.DeliveryRequest(ed.ctx, ed.connID, ed.subscription.definition, event, data) } if err != nil { ed.deliveryResponse(&core.EventDeliveryResponse{ID: event.ID, Rejected: true}) diff --git a/internal/events/event_dispatcher_test.go b/internal/events/event_dispatcher_test.go index 4b557e1084..2f592c92db 100644 --- a/internal/events/event_dispatcher_test.go +++ b/internal/events/event_dispatcher_test.go @@ -97,6 +97,40 @@ func TestEventDispatcherStartStop(t *testing.T) { ed.close() } +func TestEventDispatcherStartStopBatched(t *testing.T) { + ten := uint16(10) + oldest := core.SubOptsFirstEventOldest + truthy := true + ed, cancel := newTestEventDispatcher(&subscription{ + dispatcherElection: make(chan bool, 1), + definition: &core.Subscription{ + SubscriptionRef: core.SubscriptionRef{Namespace: "ns1", Name: "sub1"}, + Ephemeral: true, + Options: core.SubscriptionOptions{ + SubscriptionCoreOptions: core.SubscriptionCoreOptions{ + ReadAhead: &ten, + FirstEvent: &oldest, + Batch: &truthy, + }, + }, + }, + }) + defer cancel() + mdi := ed.database.(*databasemocks.Plugin) + ge := mdi.On("GetEvents", mock.Anything, mock.Anything, mock.Anything).Return([]*core.Event{}, nil, fmt.Errorf("context closed")) + confirmedElected := make(chan bool) + ge.RunFn = func(a mock.Arguments) { + <-confirmedElected + } + + assert.Equal(t, int(10), ed.readAhead) + ed.start() + confirmedElected <- true + close(confirmedElected) + ed.eventPoller.eventNotifier.newEvents <- 12345 + ed.close() +} + func TestMaxReadAhead(t *testing.T) { config.Set(coreconfig.SubscriptionDefaultsReadAhead, 65537) ed, cancel := newTestEventDispatcher(&subscription{ @@ -177,9 +211,9 @@ func TestEventDispatcherReadAheadOutOfOrderAcks(t *testing.T) { mdm := ed.data.(*datamocks.Manager) eventDeliveries := make(chan *core.EventDelivery) - deliveryRequestMock := mei.On("DeliveryRequest", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + deliveryRequestMock := mei.On("DeliveryRequest", ed.ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) deliveryRequestMock.RunFn = func(a mock.Arguments) { - eventDeliveries <- a.Get(2).(*core.EventDelivery) + eventDeliveries <- a.Get(3).(*core.EventDelivery) } // Setup the IDs @@ -272,9 +306,9 @@ func TestEventDispatcherNoReadAheadInOrder(t *testing.T) { mei := ed.transport.(*eventsmocks.Plugin) eventDeliveries := make(chan *core.EventDelivery) - deliveryRequestMock := mei.On("DeliveryRequest", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + deliveryRequestMock := mei.On("DeliveryRequest", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) deliveryRequestMock.RunFn = func(a mock.Arguments) { - eventDeliveries <- a.Get(2).(*core.EventDelivery) + eventDeliveries <- a.Get(3).(*core.EventDelivery) } // Setup the IDs @@ -603,9 +637,9 @@ func TestEnrichTransactionEvents(t *testing.T) { mei := ed.transport.(*eventsmocks.Plugin) eventDeliveries := make(chan *core.EventDelivery) - deliveryRequestMock := mei.On("DeliveryRequest", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + deliveryRequestMock := mei.On("DeliveryRequest", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) deliveryRequestMock.RunFn = func(a mock.Arguments) { - eventDeliveries <- a.Get(2).(*core.EventDelivery) + eventDeliveries <- a.Get(3).(*core.EventDelivery) } // Setup the IDs @@ -692,9 +726,9 @@ func TestEnrichBlockchainEventEvents(t *testing.T) { mei := ed.transport.(*eventsmocks.Plugin) eventDeliveries := make(chan *core.EventDelivery) - deliveryRequestMock := mei.On("DeliveryRequest", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + deliveryRequestMock := mei.On("DeliveryRequest", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) deliveryRequestMock.RunFn = func(a mock.Arguments) { - eventDeliveries <- a.Get(2).(*core.EventDelivery) + eventDeliveries <- a.Get(3).(*core.EventDelivery) } // Setup the IDs @@ -814,7 +848,7 @@ func TestBufferedDeliveryClosedContext(t *testing.T) { mdi := ed.database.(*databasemocks.Plugin) mei := ed.transport.(*eventsmocks.Plugin) mdi.On("GetDataRefs", mock.Anything, mock.Anything).Return(nil, nil, nil) - mei.On("DeliveryRequest", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + mei.On("DeliveryRequest", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) repoll, err := ed.bufferedDelivery([]core.LocallySequenced{&core.Event{ID: fftypes.NewUUID()}}) assert.False(t, repoll) @@ -837,7 +871,7 @@ func TestBufferedDeliveryNackRewind(t *testing.T) { mdi.On("UpdateOffset", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) delivered := make(chan struct{}) - deliver := mei.On("DeliveryRequest", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + deliver := mei.On("DeliveryRequest", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) deliver.RunFn = func(a mock.Arguments) { close(delivered) } @@ -882,7 +916,7 @@ func TestBufferedDeliveryFailNack(t *testing.T) { mdi.On("UpdateOffset", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) failNacked := make(chan bool) - deliver := mei.On("DeliveryRequest", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) + deliver := mei.On("DeliveryRequest", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) deliver.RunFn = func(a mock.Arguments) { failNacked <- true } @@ -932,6 +966,21 @@ func TestEventDeliveryClosed(t *testing.T) { cancel() } +func TestBatchEventDeliveryClosed(t *testing.T) { + + sub := &subscription{ + definition: &core.Subscription{}, + } + ed, cancel := newTestEventDispatcher(sub) + defer cancel() + + ed.batchTimeout = 1 * time.Minute + ed.eventDelivery <- &core.EventDelivery{} + close(ed.eventDelivery) + + ed.deliverBatchedEvents() +} + func TestAckClosed(t *testing.T) { sub := &subscription{ @@ -1064,3 +1113,207 @@ func TestEventDispatcherWithReply(t *testing.T) { mbm.AssertExpectations(t) mms.AssertExpectations(t) } + +func TestEventDeliveryBatch(t *testing.T) { + log.SetLevel("debug") + var five = uint16(5) + truthy := true + sub := &subscription{ + dispatcherElection: make(chan bool, 1), + definition: &core.Subscription{ + SubscriptionRef: core.SubscriptionRef{ID: fftypes.NewUUID(), Namespace: "ns1", Name: "sub1"}, + Options: core.SubscriptionOptions{ + SubscriptionCoreOptions: core.SubscriptionCoreOptions{ + ReadAhead: &five, + Batch: &truthy, + }, + }, + }, + eventMatcher: regexp.MustCompile(fmt.Sprintf("^%s|%s$", core.EventTypeMessageConfirmed, core.EventTypeMessageConfirmed)), + } + + ed, cancel := newTestEventDispatcher(sub) + cancel() + ed.acksNacks = make(chan ackNack, 5) + + event1 := fftypes.NewUUID() + ed.inflight[*event1] = &core.Event{ + ID: event1, + Namespace: "ns1", + } + + mms := &syncasyncmocks.Sender{} + mbm := ed.broadcast.(*broadcastmocks.Manager) + mbm.On("NewBroadcast", mock.Anything).Return(mms) + mms.On("Send", mock.Anything).Return(nil) + + ed.deliveryResponse(&core.EventDeliveryResponse{ + ID: event1, + Reply: &core.MessageInOut{ + Message: core.Message{ + Header: core.MessageHeader{ + Tag: "myreplytag1", + CID: fftypes.NewUUID(), + Type: core.MessageTypeBroadcast, + }, + }, + InlineData: core.InlineData{ + {Value: fftypes.JSONAnyPtr(`"my reply"`)}, + }, + }, + }) + + mbm.AssertExpectations(t) + mms.AssertExpectations(t) +} + +func TestEventDispatcherBatchReadAhead(t *testing.T) { + log.SetLevel("debug") + var five = uint16(5) + subID := fftypes.NewUUID() + truthy := true + oneSec := "1s" + sub := &subscription{ + dispatcherElection: make(chan bool, 1), + definition: &core.Subscription{ + SubscriptionRef: core.SubscriptionRef{ID: subID, Namespace: "ns1", Name: "sub1"}, + Options: core.SubscriptionOptions{ + SubscriptionCoreOptions: core.SubscriptionCoreOptions{ + ReadAhead: &five, + Batch: &truthy, + BatchTimeout: &oneSec, + }, + }, + }, + eventMatcher: regexp.MustCompile(fmt.Sprintf("^%s|%s$", core.EventTypeMessageConfirmed, core.EventTypeMessageConfirmed)), + } + + ed, cancel := newTestEventDispatcher(sub) + defer cancel() + go ed.deliverBatchedEvents() + ed.eventPoller.offsetCommitted = make(chan int64, 3) + mdi := ed.database.(*databasemocks.Plugin) + mei := ed.transport.(*eventsmocks.Plugin) + mdm := ed.data.(*datamocks.Manager) + + eventDeliveries := make(chan *core.EventDelivery) + deliveryRequestMock := mei.On("BatchDeliveryRequest", ed.ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil) + deliveryRequestMock.RunFn = func(a mock.Arguments) { + batchEvents := a.Get(3).([]*core.CombinedEventDataDelivery) + for _, event := range batchEvents { + eventDeliveries <- event.Event + } + } + + // Setup the IDs + ref1 := fftypes.NewUUID() + ev1 := fftypes.NewUUID() + ref2 := fftypes.NewUUID() + ev2 := fftypes.NewUUID() + ref3 := fftypes.NewUUID() + ev3 := fftypes.NewUUID() + ref4 := fftypes.NewUUID() + ev4 := fftypes.NewUUID() + + // Setup enrichment + mdm.On("GetMessageWithDataCached", mock.Anything, ref1).Return(&core.Message{ + Header: core.MessageHeader{ID: ref1}, + }, nil, true, nil) + mdm.On("GetMessageWithDataCached", mock.Anything, ref2).Return(&core.Message{ + Header: core.MessageHeader{ID: ref2}, + }, nil, true, nil) + mdm.On("GetMessageWithDataCached", mock.Anything, ref3).Return(&core.Message{ + Header: core.MessageHeader{ID: ref3}, + }, nil, true, nil) + mdm.On("GetMessageWithDataCached", mock.Anything, ref4).Return(&core.Message{ + Header: core.MessageHeader{ID: ref4}, + }, nil, true, nil) + + // Deliver a batch of messages + batch1Done := make(chan struct{}) + go func() { + repoll, err := ed.bufferedDelivery([]core.LocallySequenced{ + &core.Event{ID: ev1, Sequence: 10000001, Reference: ref1, Type: core.EventTypeMessageConfirmed}, // match + &core.Event{ID: ev2, Sequence: 10000002, Reference: ref2, Type: core.EventTypeMessageRejected}, + &core.Event{ID: ev3, Sequence: 10000003, Reference: ref3, Type: core.EventTypeMessageConfirmed}, // match + &core.Event{ID: ev4, Sequence: 10000004, Reference: ref4, Type: core.EventTypeMessageConfirmed}, // match + }) + assert.NoError(t, err) + assert.True(t, repoll) + close(batch1Done) + }() + + // Wait for the two calls to deliver the matching messages to the client (read ahead allows this) + event1 := <-eventDeliveries + assert.Equal(t, *ev1, *event1.ID) + assert.Equal(t, *ref1, *event1.Message.Header.ID) + event3 := <-eventDeliveries + assert.Equal(t, *ev3, *event3.ID) + assert.Equal(t, *ref3, *event3.Message.Header.ID) + event4 := <-eventDeliveries + assert.Equal(t, *ev4, *event4.ID) + assert.Equal(t, *ref4, *event4.Message.Header.ID) + + // Send back the two acks - out of order to validate the read-ahead logic + go func() { + ed.deliveryResponse(&core.EventDeliveryResponse{ID: event4.ID}) + ed.deliveryResponse(&core.EventDeliveryResponse{ID: event1.ID}) + ed.deliveryResponse(&core.EventDeliveryResponse{ID: event3.ID}) + }() + + // Confirm we get the offset updates in the correct order, even though the confirmations + // came in a different order from the app. + assert.Equal(t, int64(10000001), <-ed.eventPoller.offsetCommitted) + assert.Equal(t, int64(10000003), <-ed.eventPoller.offsetCommitted) + assert.Equal(t, int64(10000004), <-ed.eventPoller.offsetCommitted) + + // This should complete the batch + <-batch1Done + + mdi.AssertExpectations(t) + mei.AssertExpectations(t) + mdm.AssertExpectations(t) +} + +func TestBatchDeliverEventsWithDataFail(t *testing.T) { + yes := true + sub := &subscription{ + definition: &core.Subscription{ + Options: core.SubscriptionOptions{ + SubscriptionCoreOptions: core.SubscriptionCoreOptions{ + WithData: &yes, + }, + }, + }, + } + + ed, cancel := newTestEventDispatcher(sub) + defer cancel() + + mdm := ed.data.(*datamocks.Manager) + mdm.On("GetMessageDataCached", ed.ctx, mock.Anything).Return(nil, false, fmt.Errorf("pop")) + + id1 := fftypes.NewUUID() + ed.eventDelivery <- &core.EventDelivery{ + EnrichedEvent: core.EnrichedEvent{ + Event: core.Event{ + ID: id1, + }, + Message: &core.Message{ + Header: core.MessageHeader{ + ID: fftypes.NewUUID(), + }, + Data: core.DataRefs{ + {ID: fftypes.NewUUID()}, + }, + }, + }, + } + + ed.inflight[*id1] = &core.Event{ID: id1} + go ed.deliverBatchedEvents() + + an := <-ed.acksNacks + assert.True(t, an.isNack) + +} diff --git a/internal/events/event_enrich.go b/internal/events/event_enrich.go index ff6a581c3a..2036b202d8 100644 --- a/internal/events/event_enrich.go +++ b/internal/events/event_enrich.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -110,7 +110,13 @@ func (em *eventEnricher) enrichEvent(ctx context.Context, event *core.Event) (*c return nil, err } e.TokenTransfer = transfer - case core.EventTypeApprovalOpFailed, core.EventTypeTransferOpFailed, core.EventTypeBlockchainInvokeOpFailed, core.EventTypePoolOpFailed, core.EventTypeBlockchainInvokeOpSucceeded: + case core.EventTypeApprovalOpFailed, + core.EventTypeTransferOpFailed, + core.EventTypePoolOpFailed, + core.EventTypeBlockchainInvokeOpFailed, + core.EventTypeBlockchainInvokeOpSucceeded, + core.EventTypeBlockchainContractDeployOpFailed, + core.EventTypeBlockchainContractDeployOpSucceeded: operation, err := em.operations.GetOperationByIDCached(ctx, event.Reference) if err != nil { return nil, err diff --git a/internal/events/event_manager.go b/internal/events/event_manager.go index 546504bf7a..0bf7a02e30 100644 --- a/internal/events/event_manager.go +++ b/internal/events/event_manager.go @@ -62,13 +62,12 @@ type EventManager interface { CreateUpdateDurableSubscription(ctx context.Context, subDef *core.Subscription, mustNew bool) (err error) EnrichEvent(ctx context.Context, event *core.Event) (*core.EnrichedEvent, error) QueueBatchRewind(batchID *fftypes.UUID) + ResolveTransportAndCapabilities(ctx context.Context, transportName string) (string, *events.Capabilities, error) Start() error WaitStop() // Bound blockchain callbacks - BatchPinComplete(namespace string, batch *blockchain.BatchPin, signingKey *core.VerifierRef) error - BlockchainEvent(event *blockchain.EventWithSubscription) error - BlockchainNetworkAction(action string, location *fftypes.JSONAny, event *blockchain.Event, signingKey *core.VerifierRef) error + BlockchainEventBatch(batch []*blockchain.EventToDispatch) error // Bound dataexchange callbacks DXEvent(plugin dataexchange.Plugin, event dataexchange.DXEvent) error @@ -172,7 +171,7 @@ func NewEventManager(ctx context.Context, ns *core.Namespace, di database.Plugin em.enricher = newEventEnricher(ns.Name, di, dm, om, txHelper) - if em.subManager, err = newSubscriptionManager(ctx, ns.Name, em.enricher, di, dm, newEventNotifier, bm, pm, txHelper, transports); err != nil { + if em.subManager, err = newSubscriptionManager(ctx, ns, em.enricher, di, dm, newEventNotifier, bm, pm, txHelper, transports); err != nil { return nil, err } @@ -210,6 +209,17 @@ func (em *eventManager) DeletedSubscriptions() chan<- *fftypes.UUID { return em.subManager.deletedSubscriptions } +func (em *eventManager) ResolveTransportAndCapabilities(ctx context.Context, transportName string) (string, *events.Capabilities, error) { + if transportName == "" { + transportName = em.defaultTransport + } + t, err := em.subManager.getTransport(ctx, transportName) + if err != nil { + return "", nil, err + } + return transportName, t.Capabilities(), nil +} + func (em *eventManager) WaitStop() { em.subManager.close() if em.blobReceiver != nil { @@ -226,10 +236,6 @@ func (em *eventManager) CreateUpdateDurableSubscription(ctx context.Context, sub return i18n.NewError(ctx, coremsgs.MsgInvalidSubscription) } - if subDef.Transport == "" { - subDef.Transport = em.defaultTransport - } - // Check it can be parsed before inserting (the submanager will check again when processing the creation, so we discard the result) if _, err = em.subManager.parseSubscriptionDef(ctx, subDef); err != nil { return err diff --git a/internal/events/event_manager_test.go b/internal/events/event_manager_test.go index 0a05c18ba1..df18f72768 100644 --- a/internal/events/event_manager_test.go +++ b/internal/events/event_manager_test.go @@ -146,7 +146,7 @@ func newTestEventManagerCommon(t *testing.T, metrics, dbconcurrency bool) *testE mbi.On("VerifierType").Return(core.VerifierTypeEthAddress).Maybe() mdi.On("Capabilities").Return(&database.Capabilities{Concurrency: dbconcurrency}).Maybe() mev.On("SetHandler", "ns1", mock.Anything).Return(nil).Maybe() - mev.On("ValidateOptions", mock.Anything).Return(nil).Maybe() + mev.On("ValidateOptions", mock.Anything, mock.Anything).Return(nil).Maybe() ns := &core.Namespace{Name: "ns1", NetworkName: "ns1"} emi, err := NewEventManager(ctx, ns, mdi, mbi, mim, msh, mdm, mds, mbm, mpm, mam, msd, mmi, mom, txHelper, events, mmp, cmi) em := emi.(*eventManager) @@ -396,6 +396,7 @@ func TestCreateDurableSubscriptionDupName(t *testing.T) { em := newTestEventManager(t) defer em.cleanup(t) sub := &core.Subscription{ + Transport: "websockets", SubscriptionRef: core.SubscriptionRef{ ID: fftypes.NewUUID(), Namespace: "ns1", @@ -411,6 +412,7 @@ func TestCreateDurableSubscriptionDefaultSubCannotParse(t *testing.T) { em := newTestEventManager(t) defer em.cleanup(t) sub := &core.Subscription{ + Transport: "websockets", SubscriptionRef: core.SubscriptionRef{ ID: fftypes.NewUUID(), Namespace: "ns1", @@ -429,6 +431,7 @@ func TestCreateDurableSubscriptionBadFirstEvent(t *testing.T) { defer em.cleanup(t) wrongFirstEvent := core.SubOptsFirstEvent("lobster") sub := &core.Subscription{ + Transport: "websockets", SubscriptionRef: core.SubscriptionRef{ ID: fftypes.NewUUID(), Namespace: "ns1", @@ -450,6 +453,7 @@ func TestCreateDurableSubscriptionNegativeFirstEvent(t *testing.T) { defer em.cleanup(t) wrongFirstEvent := core.SubOptsFirstEvent("-12345") sub := &core.Subscription{ + Transport: "websockets", SubscriptionRef: core.SubscriptionRef{ ID: fftypes.NewUUID(), Namespace: "ns1", @@ -470,6 +474,7 @@ func TestCreateDurableSubscriptionGetHighestSequenceFailure(t *testing.T) { em := newTestEventManager(t) defer em.cleanup(t) sub := &core.Subscription{ + Transport: "websockets", SubscriptionRef: core.SubscriptionRef{ ID: fftypes.NewUUID(), Namespace: "ns1", @@ -486,6 +491,7 @@ func TestCreateDurableSubscriptionOk(t *testing.T) { em := newTestEventManager(t) defer em.cleanup(t) sub := &core.Subscription{ + Transport: "websockets", SubscriptionRef: core.SubscriptionRef{ ID: fftypes.NewUUID(), Namespace: "ns1", @@ -509,6 +515,7 @@ func TestUpdateDurableSubscriptionOk(t *testing.T) { em := newTestEventManager(t) defer em.cleanup(t) sub := &core.Subscription{ + Transport: "websockets", SubscriptionRef: core.SubscriptionRef{ ID: fftypes.NewUUID(), Namespace: "ns1", @@ -517,6 +524,7 @@ func TestUpdateDurableSubscriptionOk(t *testing.T) { } var firstEvent core.SubOptsFirstEvent = "12345" em.mdi.On("GetSubscriptionByName", mock.Anything, "ns1", "sub1").Return(&core.Subscription{ + Transport: "websockets", SubscriptionRef: core.SubscriptionRef{ ID: fftypes.NewUUID(), }, @@ -603,3 +611,42 @@ func TestGetPlugins(t *testing.T) { assert.ElementsMatch(t, em.GetPlugins(), expectedPlugins) } + +func TestResolveTransportAndCapabilities(t *testing.T) { + em := newTestEventManager(t) + defer em.cleanup(t) + + em.mev.On("Capabilities").Return(&events.Capabilities{BatchDelivery: true}) + + resolved, c, err := em.ResolveTransportAndCapabilities(context.Background(), "websockets") + assert.NoError(t, err) + assert.Equal(t, "websockets", resolved) + assert.NotNil(t, c) + assert.True(t, c.BatchDelivery) + + em.mev.AssertExpectations(t) +} + +func TestResolveTransportAndCapabilitiesUnknown(t *testing.T) { + em := newTestEventManager(t) + defer em.cleanup(t) + + _, _, err := em.ResolveTransportAndCapabilities(context.Background(), "wrong") + assert.Regexp(t, "FF10172", err) +} + +func TestResolveTransportAndCapabilitiesDefault(t *testing.T) { + em := newTestEventManager(t) + defer em.cleanup(t) + em.defaultTransport = "websockets" + + em.mev.On("Capabilities").Return(&events.Capabilities{BatchDelivery: true}) + + resolved, c, err := em.ResolveTransportAndCapabilities(context.Background(), "") + assert.NoError(t, err) + assert.Equal(t, "websockets", resolved) + assert.NotNil(t, c) + assert.True(t, c.BatchDelivery) + + em.mev.AssertExpectations(t) +} diff --git a/internal/events/network_action.go b/internal/events/network_action.go index 8055692f8c..4ce32aaede 100644 --- a/internal/events/network_action.go +++ b/internal/events/network_action.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -17,50 +17,50 @@ package events import ( + "context" + "github.com/hyperledger/firefly-common/pkg/fftypes" "github.com/hyperledger/firefly-common/pkg/log" "github.com/hyperledger/firefly/pkg/blockchain" "github.com/hyperledger/firefly/pkg/core" ) -func (em *eventManager) actionTerminate(location *fftypes.JSONAny, event *blockchain.Event) error { - return em.multiparty.TerminateContract(em.ctx, location, event) +func (em *eventManager) actionTerminate(ctx context.Context, location *fftypes.JSONAny, event *blockchain.Event) error { + return em.multiparty.TerminateContract(ctx, location, event) } -func (em *eventManager) BlockchainNetworkAction(action string, location *fftypes.JSONAny, event *blockchain.Event, signingKey *core.VerifierRef) error { +func (em *eventManager) handleBlockchainNetworkAction(ctx context.Context, event *blockchain.NetworkActionEvent, bc *eventBatchContext) error { if em.multiparty == nil { - log.L(em.ctx).Errorf("Ignoring network action from non-multiparty network!") + log.L(ctx).Errorf("Ignoring network action from non-multiparty network!") return nil } - return em.retry.Do(em.ctx, "handle network action", func(attempt int) (retry bool, err error) { - // Verify that the action came from a registered root org - resolvedAuthor, err := em.identity.FindIdentityForVerifier(em.ctx, []core.IdentityType{core.IdentityTypeOrg}, signingKey) - if err != nil { - return true, err - } - if resolvedAuthor == nil { - log.L(em.ctx).Errorf("Ignoring network action %s from unknown identity %s", action, signingKey.Value) - return false, nil - } - if resolvedAuthor.Parent != nil { - log.L(em.ctx).Errorf("Ignoring network action %s from non-root identity %s", action, signingKey.Value) - return false, nil - } + // Verify that the action came from a registered root org + resolvedAuthor, err := em.identity.FindIdentityForVerifier(ctx, []core.IdentityType{core.IdentityTypeOrg}, event.SigningKey) + if err != nil { + return err + } + if resolvedAuthor == nil { + log.L(ctx).Errorf("Ignoring network action %s from unknown identity %s", event.Action, event.SigningKey.Value) + return nil + } + if resolvedAuthor.Parent != nil { + log.L(ctx).Errorf("Ignoring network action %s from non-root identity %s", event.Action, event.SigningKey.Value) + return nil + } - if action == core.NetworkActionTerminate.String() { - err = em.actionTerminate(location, event) - } else { - log.L(em.ctx).Errorf("Ignoring unrecognized network action: %s", action) - return false, nil - } + if event.Action == core.NetworkActionTerminate.String() { + err = em.actionTerminate(ctx, event.Location, event.Event) + } else { + log.L(ctx).Errorf("Ignoring unrecognized network action: %s", event.Action) + return nil + } - if err == nil { - chainEvent := buildBlockchainEvent(em.namespace.Name, nil, event, &core.BlockchainTransactionRef{ - BlockchainID: event.BlockchainTXID, - }) - err = em.maybePersistBlockchainEvent(em.ctx, chainEvent, nil) - } - return true, err - }) + if err == nil { + chainEvent := buildBlockchainEvent(em.namespace.Name, nil, event.Event, &core.BlockchainTransactionRef{ + BlockchainID: event.Event.BlockchainTXID, + }) + bc.addEventToInsert(chainEvent, em.getTopicForChainListener(nil)) + } + return err } diff --git a/internal/events/network_action_test.go b/internal/events/network_action_test.go index bcd017d7e7..f69208116a 100644 --- a/internal/events/network_action_test.go +++ b/internal/events/network_action_test.go @@ -39,13 +39,23 @@ func TestNetworkAction(t *testing.T) { } em.mim.On("FindIdentityForVerifier", em.ctx, []core.IdentityType{core.IdentityTypeOrg}, verifier).Return(&core.Identity{}, nil) - em.mth.On("InsertOrGetBlockchainEvent", em.ctx, mock.MatchedBy(func(be *core.BlockchainEvent) bool { - return be.ProtocolID == "0001" - })).Return(nil, nil) + em.mth.On("InsertNewBlockchainEvents", em.ctx, mock.MatchedBy(func(be []*core.BlockchainEvent) bool { + return len(be) == 1 && be[0].ProtocolID == "0001" + })).Return([]*core.BlockchainEvent{{ID: fftypes.NewUUID()}}, nil) em.mdi.On("InsertEvent", em.ctx, mock.Anything).Return(nil) em.mmp.On("TerminateContract", em.ctx, location, mock.AnythingOfType("*blockchain.Event")).Return(nil) - err := em.BlockchainNetworkAction("terminate", location, event, verifier) + err := em.BlockchainEventBatch([]*blockchain.EventToDispatch{ + { + Type: blockchain.EventTypeNetworkAction, + NetworkAction: &blockchain.NetworkActionEvent{ + Action: "terminate", + Location: location, + Event: event, + SigningKey: verifier, + }, + }, + }) assert.NoError(t, err) } @@ -62,7 +72,17 @@ func TestNetworkActionUnknownIdentity(t *testing.T) { em.mim.On("FindIdentityForVerifier", em.ctx, []core.IdentityType{core.IdentityTypeOrg}, verifier).Return(nil, fmt.Errorf("pop")).Once() em.mim.On("FindIdentityForVerifier", em.ctx, []core.IdentityType{core.IdentityTypeOrg}, verifier).Return(nil, nil).Once() - err := em.BlockchainNetworkAction("terminate", location, &blockchain.Event{}, verifier) + err := em.BlockchainEventBatch([]*blockchain.EventToDispatch{ + { + Type: blockchain.EventTypeNetworkAction, + NetworkAction: &blockchain.NetworkActionEvent{ + Action: "terminate", + Location: location, + Event: &blockchain.Event{}, + SigningKey: verifier, + }, + }, + }) assert.NoError(t, err) } @@ -82,7 +102,17 @@ func TestNetworkActionNonRootIdentity(t *testing.T) { }, }, nil) - err := em.BlockchainNetworkAction("terminate", location, &blockchain.Event{}, verifier) + err := em.BlockchainEventBatch([]*blockchain.EventToDispatch{ + { + Type: blockchain.EventTypeNetworkAction, + NetworkAction: &blockchain.NetworkActionEvent{ + Action: "terminate", + Location: location, + Event: &blockchain.Event{}, + SigningKey: verifier, + }, + }, + }) assert.NoError(t, err) } @@ -97,7 +127,17 @@ func TestNetworkActionNonMultiparty(t *testing.T) { Value: "0x1234", } - err := em.BlockchainNetworkAction("terminate", location, &blockchain.Event{}, verifier) + err := em.BlockchainEventBatch([]*blockchain.EventToDispatch{ + { + Type: blockchain.EventTypeNetworkAction, + NetworkAction: &blockchain.NetworkActionEvent{ + Action: "terminate", + Location: location, + Event: &blockchain.Event{}, + SigningKey: verifier, + }, + }, + }) assert.NoError(t, err) } @@ -113,7 +153,17 @@ func TestNetworkActionUnknown(t *testing.T) { em.mim.On("FindIdentityForVerifier", em.ctx, []core.IdentityType{core.IdentityTypeOrg}, verifier).Return(&core.Identity{}, nil) - err := em.BlockchainNetworkAction("bad", location, &blockchain.Event{}, verifier) + err := em.BlockchainEventBatch([]*blockchain.EventToDispatch{ + { + Type: blockchain.EventTypeNetworkAction, + NetworkAction: &blockchain.NetworkActionEvent{ + Action: "bad", + Location: location, + Event: &blockchain.Event{}, + SigningKey: verifier, + }, + }, + }) assert.NoError(t, err) } @@ -125,6 +175,6 @@ func TestActionTerminateFail(t *testing.T) { em.mmp.On("TerminateContract", em.ctx, location, mock.AnythingOfType("*blockchain.Event")).Return(fmt.Errorf("pop")) - err := em.actionTerminate(location, &blockchain.Event{}) + err := em.actionTerminate(em.ctx, location, &blockchain.Event{}) assert.EqualError(t, err, "pop") } diff --git a/internal/events/persist_batch.go b/internal/events/persist_batch.go index b31a3bb05b..831cfb493e 100644 --- a/internal/events/persist_batch.go +++ b/internal/events/persist_batch.go @@ -177,7 +177,7 @@ func (em *eventManager) checkAndInitiateBlobDownloads(ctx context.Context, batch log.L(ctx).Errorf("Invalid data entry %d id=%s in batch '%s' - missing public blob reference", i, data.ID, batch.ID) return false, nil } - if err = em.sharedDownload.InitiateDownloadBlob(ctx, batch.Payload.TX.ID, data.ID, data.Blob.Public); err != nil { + if err = em.sharedDownload.InitiateDownloadBlob(ctx, batch.Payload.TX.ID, data.ID, data.Blob.Public, false /* batch processing does not currently use idempotency keys */); err != nil { return false, err } } diff --git a/internal/events/subscription_manager.go b/internal/events/subscription_manager.go index 0ce9bb8d8d..788e72ec31 100644 --- a/internal/events/subscription_manager.go +++ b/internal/events/subscription_manager.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -73,7 +73,7 @@ type connection struct { type subscriptionManager struct { ctx context.Context - namespace string + namespace *core.Namespace enricher *eventEnricher database database.Plugin data data.Manager @@ -92,7 +92,7 @@ type subscriptionManager struct { retry retry.Retry } -func newSubscriptionManager(ctx context.Context, ns string, enricher *eventEnricher, di database.Plugin, dm data.Manager, en *eventNotifier, bm broadcast.Manager, pm privatemessaging.Manager, txHelper txcommon.Helper, transports map[string]events.Plugin) (*subscriptionManager, error) { +func newSubscriptionManager(ctx context.Context, ns *core.Namespace, enricher *eventEnricher, di database.Plugin, dm data.Manager, en *eventNotifier, bm broadcast.Manager, pm privatemessaging.Manager, txHelper txcommon.Helper, transports map[string]events.Plugin) (*subscriptionManager, error) { ctx, cancelCtx := context.WithCancel(ctx) sm := &subscriptionManager{ ctx: ctx, @@ -119,7 +119,7 @@ func newSubscriptionManager(ctx context.Context, ns string, enricher *eventEnric } for _, ei := range sm.transports { - if err := ei.SetHandler(sm.namespace, &boundCallbacks{sm: sm, ei: ei}); err != nil { + if err := ei.SetHandler(sm.namespace.Name, &boundCallbacks{sm: sm, ei: ei}); err != nil { return nil, err } } @@ -130,7 +130,7 @@ func newSubscriptionManager(ctx context.Context, ns string, enricher *eventEnric func (sm *subscriptionManager) start() error { fb := database.SubscriptionQueryFactory.NewFilter(sm.ctx) filter := fb.And().Limit(sm.maxSubs) - persistedSubs, _, err := sm.database.GetSubscriptions(sm.ctx, sm.namespace, filter) + persistedSubs, _, err := sm.database.GetSubscriptions(sm.ctx, sm.namespace.Name, filter) if err != nil { return err } @@ -169,7 +169,7 @@ func (sm *subscriptionManager) subscriptionEventListener() { func (sm *subscriptionManager) newOrUpdatedDurableSubscription(id *fftypes.UUID) { var subDef *core.Subscription err := sm.retry.Do(sm.ctx, "retrieve subscription", func(attempt int) (retry bool, err error) { - subDef, err = sm.database.GetSubscriptionByID(sm.ctx, sm.namespace, id) + subDef, err = sm.database.GetSubscriptionByID(sm.ctx, sm.namespace.Name, id) return err != nil, err // indefinite retry }) if err != nil || subDef == nil { @@ -249,15 +249,28 @@ func (sm *subscriptionManager) deletedDurableSubscription(id *fftypes.UUID) { } } +func (sm *subscriptionManager) getTransport(ctx context.Context, transportName string) (events.Plugin, error) { + transport, ok := sm.transports[transportName] + if !ok { + return nil, i18n.NewError(ctx, coremsgs.MsgUnknownEventTransportPlugin, transportName) + } + return transport, nil +} + +// nolint: gocyclo func (sm *subscriptionManager) parseSubscriptionDef(ctx context.Context, subDef *core.Subscription) (sub *subscription, err error) { filter := subDef.Filter - transport, ok := sm.transports[subDef.Transport] - if !ok { - return nil, i18n.NewError(ctx, coremsgs.MsgUnknownEventTransportPlugin, subDef.Transport) + transport, err := sm.getTransport(ctx, subDef.Transport) + if err != nil { + return nil, err + } + + if subDef.Options.TLSConfigName != "" && sm.namespace.TLSConfigs[subDef.Options.TLSConfigName] != nil { + subDef.Options.TLSConfig = sm.namespace.TLSConfigs[subDef.Options.TLSConfigName] } - if err := transport.ValidateOptions(&subDef.Options); err != nil { + if err := transport.ValidateOptions(ctx, &subDef.Options); err != nil { return nil, err } diff --git a/internal/events/subscription_manager_test.go b/internal/events/subscription_manager_test.go index 99ca3efc00..d72b8d1116 100644 --- a/internal/events/subscription_manager_test.go +++ b/internal/events/subscription_manager_test.go @@ -18,11 +18,13 @@ package events import ( "context" + "crypto/tls" "fmt" "testing" "time" "github.com/hyperledger/firefly-common/pkg/config" + "github.com/hyperledger/firefly-common/pkg/ffresty" "github.com/hyperledger/firefly-common/pkg/fftypes" "github.com/hyperledger/firefly/internal/cache" "github.com/hyperledger/firefly/internal/coreconfig" @@ -62,7 +64,7 @@ func newTestSubManager(t *testing.T, mei *eventsmocks.Plugin) (*subscriptionMana mei.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) mdi.On("GetEvents", mock.Anything, mock.Anything, mock.Anything).Return([]*core.Event{}, nil, nil).Maybe() mdi.On("GetOffset", mock.Anything, mock.Anything, mock.Anything).Return(&core.Offset{RowID: 3333333, Current: 0}, nil).Maybe() - sm, err := newSubscriptionManager(ctx, "ns1", enricher, mdi, mdm, newEventNotifier(ctx, "ut"), mbm, mpm, txHelper, nil) + sm, err := newSubscriptionManager(ctx, &core.Namespace{Name: "ns1"}, enricher, mdi, mdm, newEventNotifier(ctx, "ut"), mbm, mpm, txHelper, nil) assert.NoError(t, err) sm.transports = map[string]events.Plugin{ "ut": mei, @@ -93,7 +95,7 @@ func TestRegisterDurableSubscriptions(t *testing.T) { ID: sub2, }, Transport: "ut"}, }, nil, nil) - mei.On("ValidateOptions", mock.Anything).Return(nil) + mei.On("ValidateOptions", mock.Anything, mock.Anything).Return(nil) err := sm.start() assert.NoError(t, err) @@ -151,7 +153,7 @@ func TestReloadDurableSubscription(t *testing.T) { Name: "sub1", }, Transport: "ut"}, }, nil, nil) - mei.On("ValidateOptions", mock.Anything).Return(nil) + mei.On("ValidateOptions", mock.Anything, mock.Anything).Return(nil) err := sm.start() assert.NoError(t, err) @@ -168,7 +170,7 @@ func TestRegisterEphemeralSubscriptions(t *testing.T) { mdi := sm.database.(*databasemocks.Plugin) mdi.On("GetSubscriptions", mock.Anything, "ns1", mock.Anything).Return([]*core.Subscription{}, nil, nil) - mei.On("ValidateOptions", mock.Anything).Return(nil) + mei.On("ValidateOptions", mock.Anything, mock.Anything).Return(nil) err := sm.start() assert.NoError(t, err) @@ -197,7 +199,7 @@ func TestRegisterEphemeralSubscriptionsFail(t *testing.T) { mdi := sm.database.(*databasemocks.Plugin) mdi.On("GetSubscriptions", mock.Anything, "ns1", mock.Anything).Return([]*core.Subscription{}, nil, nil) - mei.On("ValidateOptions", mock.Anything).Return(nil) + mei.On("ValidateOptions", mock.Anything, mock.Anything).Return(nil) err := sm.start() assert.NoError(t, err) be := &boundCallbacks{sm: sm, ei: mei} @@ -288,7 +290,7 @@ func TestCreateSubscriptionBadTransportOptions(t *testing.T) { Options: core.SubscriptionOptions{}, } sub.Options.TransportOptions()["myoption"] = "badvalue" - mei.On("ValidateOptions", mock.MatchedBy(func(opts *core.SubscriptionOptions) bool { + mei.On("ValidateOptions", mock.Anything, mock.MatchedBy(func(opts *core.SubscriptionOptions) bool { return opts.TransportOptions()["myoption"] == "badvalue" })).Return(fmt.Errorf("pop")) _, err := sm.parseSubscriptionDef(sm.ctx, sub) @@ -299,7 +301,7 @@ func TestCreateSubscriptionBadEventilter(t *testing.T) { mei := &eventsmocks.Plugin{} sm, cancel := newTestSubManager(t, mei) defer cancel() - mei.On("ValidateOptions", mock.Anything).Return(nil) + mei.On("ValidateOptions", mock.Anything, mock.Anything).Return(nil) _, err := sm.parseSubscriptionDef(sm.ctx, &core.Subscription{ Filter: core.SubscriptionFilter{ Events: "[[[[! badness", @@ -313,7 +315,7 @@ func TestCreateSubscriptionBadTopicFilter(t *testing.T) { mei := &eventsmocks.Plugin{} sm, cancel := newTestSubManager(t, mei) defer cancel() - mei.On("ValidateOptions", mock.Anything).Return(nil) + mei.On("ValidateOptions", mock.Anything, mock.Anything).Return(nil) _, err := sm.parseSubscriptionDef(sm.ctx, &core.Subscription{ Filter: core.SubscriptionFilter{ Topic: "[[[[! badness", @@ -327,7 +329,7 @@ func TestCreateSubscriptionBadGroupFilter(t *testing.T) { mei := &eventsmocks.Plugin{} sm, cancel := newTestSubManager(t, mei) defer cancel() - mei.On("ValidateOptions", mock.Anything).Return(nil) + mei.On("ValidateOptions", mock.Anything, mock.Anything).Return(nil) _, err := sm.parseSubscriptionDef(sm.ctx, &core.Subscription{ Filter: core.SubscriptionFilter{ Message: core.MessageFilter{ @@ -343,7 +345,7 @@ func TestCreateSubscriptionBadAuthorFilter(t *testing.T) { mei := &eventsmocks.Plugin{} sm, cancel := newTestSubManager(t, mei) defer cancel() - mei.On("ValidateOptions", mock.Anything).Return(nil) + mei.On("ValidateOptions", mock.Anything, mock.Anything).Return(nil) _, err := sm.parseSubscriptionDef(sm.ctx, &core.Subscription{ Filter: core.SubscriptionFilter{ Message: core.MessageFilter{ @@ -359,7 +361,7 @@ func TestCreateSubscriptionBadTxTypeFilter(t *testing.T) { mei := &eventsmocks.Plugin{} sm, cancel := newTestSubManager(t, mei) defer cancel() - mei.On("ValidateOptions", mock.Anything).Return(nil) + mei.On("ValidateOptions", mock.Anything, mock.Anything).Return(nil) _, err := sm.parseSubscriptionDef(sm.ctx, &core.Subscription{ Filter: core.SubscriptionFilter{ Transaction: core.TransactionFilter{ @@ -375,7 +377,7 @@ func TestCreateSubscriptionBadBlockchainEventNameFilter(t *testing.T) { mei := &eventsmocks.Plugin{} sm, cancel := newTestSubManager(t, mei) defer cancel() - mei.On("ValidateOptions", mock.Anything).Return(nil) + mei.On("ValidateOptions", mock.Anything, mock.Anything).Return(nil) _, err := sm.parseSubscriptionDef(sm.ctx, &core.Subscription{ Filter: core.SubscriptionFilter{ BlockchainEvent: core.BlockchainEventFilter{ @@ -391,7 +393,7 @@ func TestCreateSubscriptionBadDeprecatedGroupFilter(t *testing.T) { mei := &eventsmocks.Plugin{} sm, cancel := newTestSubManager(t, mei) defer cancel() - mei.On("ValidateOptions", mock.Anything).Return(nil) + mei.On("ValidateOptions", mock.Anything, mock.Anything).Return(nil) _, err := sm.parseSubscriptionDef(sm.ctx, &core.Subscription{ Filter: core.SubscriptionFilter{ DeprecatedGroup: "[[[[! badness", @@ -405,7 +407,7 @@ func TestCreateSubscriptionBadDeprecatedTagFilter(t *testing.T) { mei := &eventsmocks.Plugin{} sm, cancel := newTestSubManager(t, mei) defer cancel() - mei.On("ValidateOptions", mock.Anything).Return(nil) + mei.On("ValidateOptions", mock.Anything, mock.Anything).Return(nil) _, err := sm.parseSubscriptionDef(sm.ctx, &core.Subscription{ Filter: core.SubscriptionFilter{ DeprecatedTag: "[[[[! badness", @@ -419,7 +421,7 @@ func TestCreateSubscriptionBadMessageTagFilter(t *testing.T) { mei := &eventsmocks.Plugin{} sm, cancel := newTestSubManager(t, mei) defer cancel() - mei.On("ValidateOptions", mock.Anything).Return(nil) + mei.On("ValidateOptions", mock.Anything, mock.Anything).Return(nil) _, err := sm.parseSubscriptionDef(sm.ctx, &core.Subscription{ Filter: core.SubscriptionFilter{ Message: core.MessageFilter{ @@ -435,7 +437,7 @@ func TestCreateSubscriptionBadDeprecatedAuthorFilter(t *testing.T) { mei := &eventsmocks.Plugin{} sm, cancel := newTestSubManager(t, mei) defer cancel() - mei.On("ValidateOptions", mock.Anything).Return(nil) + mei.On("ValidateOptions", mock.Anything, mock.Anything).Return(nil) _, err := sm.parseSubscriptionDef(sm.ctx, &core.Subscription{ Filter: core.SubscriptionFilter{ DeprecatedAuthor: "[[[[! badness", @@ -449,7 +451,7 @@ func TestCreateSubscriptionBadDeprecatedTopicsFilter(t *testing.T) { mei := &eventsmocks.Plugin{} sm, cancel := newTestSubManager(t, mei) defer cancel() - mei.On("ValidateOptions", mock.Anything).Return(nil) + mei.On("ValidateOptions", mock.Anything, mock.Anything).Return(nil) _, err := sm.parseSubscriptionDef(sm.ctx, &core.Subscription{ Filter: core.SubscriptionFilter{ DeprecatedTopics: "[[[[! badness", @@ -463,7 +465,7 @@ func TestCreateSubscriptionBadBlockchainEventListenerFilter(t *testing.T) { mei := &eventsmocks.Plugin{} sm, cancel := newTestSubManager(t, mei) defer cancel() - mei.On("ValidateOptions", mock.Anything).Return(nil) + mei.On("ValidateOptions", mock.Anything, mock.Anything).Return(nil) _, err := sm.parseSubscriptionDef(sm.ctx, &core.Subscription{ Filter: core.SubscriptionFilter{ BlockchainEvent: core.BlockchainEventFilter{ @@ -479,7 +481,7 @@ func TestCreateSubscriptionSuccessMessageFilter(t *testing.T) { mei := &eventsmocks.Plugin{} sm, cancel := newTestSubManager(t, mei) defer cancel() - mei.On("ValidateOptions", mock.Anything).Return(nil) + mei.On("ValidateOptions", mock.Anything, mock.Anything).Return(nil) _, err := sm.parseSubscriptionDef(sm.ctx, &core.Subscription{ Filter: core.SubscriptionFilter{ Message: core.MessageFilter{ @@ -495,7 +497,7 @@ func TestCreateSubscriptionSuccessTxFilter(t *testing.T) { mei := &eventsmocks.Plugin{} sm, cancel := newTestSubManager(t, mei) defer cancel() - mei.On("ValidateOptions", mock.Anything).Return(nil) + mei.On("ValidateOptions", mock.Anything, mock.Anything).Return(nil) _, err := sm.parseSubscriptionDef(sm.ctx, &core.Subscription{ Filter: core.SubscriptionFilter{ Transaction: core.TransactionFilter{ @@ -511,7 +513,7 @@ func TestCreateSubscriptionSuccessBlockchainEvent(t *testing.T) { mei := &eventsmocks.Plugin{} sm, cancel := newTestSubManager(t, mei) defer cancel() - mei.On("ValidateOptions", mock.Anything).Return(nil) + mei.On("ValidateOptions", mock.Anything, mock.Anything).Return(nil) _, err := sm.parseSubscriptionDef(sm.ctx, &core.Subscription{ Filter: core.SubscriptionFilter{ BlockchainEvent: core.BlockchainEventFilter{ @@ -523,11 +525,37 @@ func TestCreateSubscriptionSuccessBlockchainEvent(t *testing.T) { assert.NoError(t, err) } +func TestCreateSubscriptionSuccessTLSConfig(t *testing.T) { + coreconfig.Reset() + + mei := &eventsmocks.Plugin{} + sm, cancel := newTestSubManager(t, mei) + defer cancel() + + sm.namespace.TLSConfigs = map[string]*tls.Config{ + "myconfig": {}, + } + + mei.On("GetFFRestyConfig", mock.Anything).Return(&ffresty.Config{}) + mei.On("ValidateOptions", mock.Anything, mock.Anything).Return(nil) + sub, err := sm.parseSubscriptionDef(sm.ctx, &core.Subscription{ + Options: core.SubscriptionOptions{ + WebhookSubOptions: core.WebhookSubOptions{ + TLSConfigName: "myconfig", + }, + }, + Transport: "ut", + }) + assert.NoError(t, err) + + assert.NotNil(t, sub.definition.Options.TLSConfig) +} + func TestCreateSubscriptionWithDeprecatedFilters(t *testing.T) { mei := &eventsmocks.Plugin{} sm, cancel := newTestSubManager(t, mei) defer cancel() - mei.On("ValidateOptions", mock.Anything).Return(nil) + mei.On("ValidateOptions", mock.Anything, mock.Anything).Return(nil) _, err := sm.parseSubscriptionDef(sm.ctx, &core.Subscription{ Filter: core.SubscriptionFilter{ Topic: "flop", @@ -548,7 +576,7 @@ func TestDispatchDeliveryResponseOK(t *testing.T) { defer cancel() mdi := sm.database.(*databasemocks.Plugin) mdi.On("GetSubscriptions", mock.Anything, "ns1", mock.Anything).Return([]*core.Subscription{}, nil, nil) - mei.On("ValidateOptions", mock.Anything).Return(nil) + mei.On("ValidateOptions", mock.Anything, mock.Anything).Return(nil) err := sm.start() assert.NoError(t, err) be := &boundCallbacks{sm: sm, ei: mei} @@ -673,7 +701,7 @@ func TestNewDurableSubscriptionOK(t *testing.T) { sm, cancel := newTestSubManager(t, mei) defer cancel() mdi := sm.database.(*databasemocks.Plugin) - mei.On("ValidateOptions", mock.Anything).Return(nil) + mei.On("ValidateOptions", mock.Anything, mock.Anything).Return(nil) sm.connections["conn1"] = &connection{ ei: mei, @@ -705,7 +733,7 @@ func TestUpdatedDurableSubscriptionNoOp(t *testing.T) { sm, cancel := newTestSubManager(t, mei) defer cancel() mdi := sm.database.(*databasemocks.Plugin) - mei.On("ValidateOptions", mock.Anything).Return(nil) + mei.On("ValidateOptions", mock.Anything, mock.Anything).Return(nil) subID := fftypes.NewUUID() sub := &core.Subscription{ @@ -747,7 +775,7 @@ func TestUpdatedDurableSubscriptionOK(t *testing.T) { sm, cancel := newTestSubManager(t, mei) defer cancel() mdi := sm.database.(*databasemocks.Plugin) - mei.On("ValidateOptions", mock.Anything).Return(nil) + mei.On("ValidateOptions", mock.Anything, mock.Anything).Return(nil) subID := fftypes.NewUUID() sub := &core.Subscription{ diff --git a/internal/events/system/events.go b/internal/events/system/events.go index 3f09a94005..354be298d7 100644 --- a/internal/events/system/events.go +++ b/internal/events/system/events.go @@ -23,6 +23,8 @@ import ( "github.com/hyperledger/firefly-common/pkg/config" "github.com/hyperledger/firefly-common/pkg/fftypes" + "github.com/hyperledger/firefly-common/pkg/i18n" + "github.com/hyperledger/firefly/internal/coremsgs" "github.com/hyperledger/firefly/pkg/core" "github.com/hyperledger/firefly/pkg/events" ) @@ -73,6 +75,10 @@ func (se *Events) Init(ctx context.Context, config config.Section) (err error) { func (se *Events) SetHandler(namespace string, handler events.Callbacks) error { se.callbacks.writeLock.Lock() defer se.callbacks.writeLock.Unlock() + if handler == nil { + delete(se.callbacks.handlers, namespace) + return nil + } se.callbacks.handlers[namespace] = handler // We have a single logical connection, that matches all subscriptions return handler.RegisterConnection(se.connID, func(sr core.SubscriptionRef) bool { return true }) @@ -82,7 +88,7 @@ func (se *Events) Capabilities() *events.Capabilities { return se.capabilities } -func (se *Events) ValidateOptions(options *core.SubscriptionOptions) error { +func (se *Events) ValidateOptions(ctx context.Context, options *core.SubscriptionOptions) error { return nil } @@ -107,7 +113,7 @@ func (se *Events) AddListener(ns string, el EventListener) error { return nil } -func (se *Events) DeliveryRequest(connID string, sub *core.Subscription, event *core.EventDelivery, data core.DataArray) error { +func (se *Events) DeliveryRequest(ctx context.Context, connID string, sub *core.Subscription, event *core.EventDelivery, data core.DataArray) error { se.mux.Lock() defer se.mux.Unlock() for ns, listeners := range se.listeners { @@ -130,6 +136,10 @@ func (se *Events) DeliveryRequest(connID string, sub *core.Subscription, event * return nil } +func (se *Events) BatchDeliveryRequest(ctx context.Context, connID string, sub *core.Subscription, events []*core.CombinedEventDataDelivery) error { + return i18n.NewError(ctx, coremsgs.MsgBatchDeliveryNotSupported, se.Name()) // should never happen +} + func (se *Events) NamespaceRestarted(ns string, startTime time.Time) { // no-op } diff --git a/internal/events/system/events_test.go b/internal/events/system/events_test.go index 0aace7dc1d..69f6c3bfb9 100644 --- a/internal/events/system/events_test.go +++ b/internal/events/system/events_test.go @@ -47,7 +47,7 @@ func newTestEvents(t *testing.T) (se *Events, cancel func()) { se.SetHandler("ns1", cbs) assert.Equal(t, "system", se.Name()) assert.NotNil(t, se.Capabilities()) - assert.Nil(t, se.ValidateOptions(&core.SubscriptionOptions{})) + assert.Nil(t, se.ValidateOptions(ctx, &core.SubscriptionOptions{})) return se, cancelCtx } @@ -73,7 +73,7 @@ func TestDeliveryRequestOk(t *testing.T) { }) assert.NoError(t, err) - err = se.DeliveryRequest(se.connID, sub, &core.EventDelivery{ + err = se.DeliveryRequest(se.ctx, se.connID, sub, &core.EventDelivery{ EnrichedEvent: core.EnrichedEvent{ Event: core.Event{ Namespace: "ns1", @@ -82,7 +82,7 @@ func TestDeliveryRequestOk(t *testing.T) { }, nil) assert.NoError(t, err) - err = se.DeliveryRequest(se.connID, &core.Subscription{}, &core.EventDelivery{ + err = se.DeliveryRequest(se.ctx, se.connID, &core.Subscription{}, &core.EventDelivery{ EnrichedEvent: core.EnrichedEvent{ Event: core.Event{ Namespace: "ns2", @@ -94,6 +94,9 @@ func TestDeliveryRequestOk(t *testing.T) { assert.Equal(t, 1, called) cbs.AssertExpectations(t) + se.SetHandler("ns1", nil) + assert.Empty(t, se.callbacks.handlers) + } func TestDeliveryRequestFail(t *testing.T) { @@ -109,7 +112,7 @@ func TestDeliveryRequestFail(t *testing.T) { }) assert.NoError(t, err) - err = se.DeliveryRequest(mock.Anything, &core.Subscription{}, &core.EventDelivery{ + err = se.DeliveryRequest(se.ctx, mock.Anything, &core.Subscription{}, &core.EventDelivery{ EnrichedEvent: core.EnrichedEvent{ Event: core.Event{ Namespace: "ns1", @@ -139,3 +142,17 @@ func TestNamespaceRestarted(t *testing.T) { se.NamespaceRestarted("ns1", time.Now()) } + +func TestEventDeliveryBatch(t *testing.T) { + se, cancel := newTestEvents(t) + defer cancel() + + sub := &core.Subscription{ + SubscriptionRef: core.SubscriptionRef{ + Namespace: "ns1", + }, + } + + err := se.BatchDeliveryRequest(se.ctx, "id", sub, []*core.CombinedEventDataDelivery{}) + assert.Regexp(t, "FF10461", err) +} diff --git a/internal/events/token_pool_created.go b/internal/events/token_pool_created.go index 6f8c2ed4cc..7fa7ef1a5e 100644 --- a/internal/events/token_pool_created.go +++ b/internal/events/token_pool_created.go @@ -25,12 +25,14 @@ import ( "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/pkg/blockchain" "github.com/hyperledger/firefly/pkg/core" + "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/tokens" ) func addPoolDetailsFromPlugin(ffPool *core.TokenPool, pluginPool *tokens.TokenPool) error { ffPool.Type = pluginPool.Type ffPool.Locator = pluginPool.PoolLocator + ffPool.PluginData = pluginPool.PluginData ffPool.Connector = pluginPool.Connector ffPool.Standard = pluginPool.Standard ffPool.InterfaceFormat = (core.TokenInterfaceFormat)(pluginPool.InterfaceFormat) @@ -50,7 +52,7 @@ func addPoolDetailsFromPlugin(ffPool *core.TokenPool, pluginPool *tokens.TokenPo } func (em *eventManager) confirmPool(ctx context.Context, pool *core.TokenPool, ev *blockchain.Event) error { - log.L(ctx).Debugf("Confirming pool ID=%s Locator='%s'", pool.ID, pool.Locator) + log.L(ctx).Debugf("Confirming token pool ID='%s' Locator='%s'", pool.ID, pool.Locator) var blockchainID string if ev != nil { // Some pools will not include a blockchain event for creation (such as when indexing a pre-existing pool) @@ -68,8 +70,8 @@ func (em *eventManager) confirmPool(ctx context.Context, pool *core.TokenPool, e if _, err := em.txHelper.PersistTransaction(ctx, pool.TX.ID, pool.TX.Type, blockchainID); err != nil { return err } - pool.State = core.TokenPoolStateConfirmed - if err := em.database.UpsertTokenPool(ctx, pool); err != nil { + pool.Active = true + if err := em.database.UpsertTokenPool(ctx, pool, database.UpsertOptimizationExisting); err != nil { return err } log.L(ctx).Infof("Token pool confirmed, id=%s", pool.ID) @@ -77,8 +79,15 @@ func (em *eventManager) confirmPool(ctx context.Context, pool *core.TokenPool, e return em.database.InsertEvent(ctx, event) } +func (em *eventManager) getPoolByIDOrLocator(ctx context.Context, id *fftypes.UUID, connector, locator string) (*core.TokenPool, error) { + if id != nil { + return em.assets.GetTokenPoolByID(ctx, id) + } + return em.assets.GetTokenPoolByLocator(ctx, connector, locator) +} + func (em *eventManager) loadExisting(ctx context.Context, pool *tokens.TokenPool) (existingPool *core.TokenPool, err error) { - if existingPool, err = em.database.GetTokenPoolByLocator(ctx, em.namespace.Name, pool.Connector, pool.PoolLocator); err != nil || existingPool == nil { + if existingPool, err = em.getPoolByIDOrLocator(ctx, pool.ID, pool.Connector, pool.PoolLocator); err != nil || existingPool == nil { log.L(ctx).Debugf("Pool not found with ns=%s connector=%s locator=%s (err=%v)", em.namespace.Name, pool.Connector, pool.PoolLocator, err) return existingPool, err } @@ -90,7 +99,7 @@ func (em *eventManager) loadExisting(ctx context.Context, pool *tokens.TokenPool return existingPool, nil } -func (em *eventManager) loadFromOperation(ctx context.Context, pool *tokens.TokenPool) (announcePool *core.TokenPool, err error) { +func (em *eventManager) loadFromOperation(ctx context.Context, pool *tokens.TokenPool) (stagedPool *core.TokenPool, err error) { op, err := em.txHelper.FindOperationInTransaction(ctx, pool.TX.ID, core.OpTypeTokenCreatePool) if err != nil { return nil, err @@ -99,57 +108,59 @@ func (em *eventManager) loadFromOperation(ctx context.Context, pool *tokens.Toke return nil, nil } - announcePool, err = txcommon.RetrieveTokenPoolCreateInputs(ctx, op) - if err != nil || announcePool.ID == nil || announcePool.Namespace == "" || announcePool.Name == "" { + stagedPool, err = txcommon.RetrieveTokenPoolCreateInputs(ctx, op) + if err != nil || stagedPool.ID == nil || stagedPool.Namespace == "" || stagedPool.Name == "" { log.L(ctx).Errorf("Error loading pool info for transaction '%s' (%s) - ignoring: %v", pool.TX.ID, err, op.Input) return nil, nil } - if err = addPoolDetailsFromPlugin(announcePool, pool); err != nil { + if err = addPoolDetailsFromPlugin(stagedPool, pool); err != nil { log.L(ctx).Errorf("Error processing pool for transaction '%s' (%s) - ignoring", pool.TX.ID, err) return nil, nil } - return announcePool, nil + return stagedPool, nil } // It is expected that this method might be invoked twice for each pool, depending on the behavior of the connector. -// It will be at least invoked on the submitter when the pool is first created, to trigger the submitter to announce it. -// It will be invoked on every node (including the submitter) after the pool is announced+activated, to trigger confirmation of the pool. +// It will be at least invoked on the submitter when the pool is first created, to trigger the submitter to publish it. +// It will be invoked on every node (including the submitter) after the pool is published+activated, to trigger confirmation of the pool. // When received in any other scenario, it should be ignored. // // The context passed to this callback is dependent on what phase it is called in. -// In the case that it is called synchronously on the submitter, in order to trigger the announcement, the original context +// In the case that it is called synchronously on the submitter, in order to trigger the publish, the original context // of the REST API will be propagated (so it can be used for the resolution of the org signing key). func (em *eventManager) TokenPoolCreated(ctx context.Context, ti tokens.Plugin, pool *tokens.TokenPool) (err error) { var msgIDforRewind *fftypes.UUID - var announcePool *core.TokenPool + var stagedPool *core.TokenPool err = em.retry.Do(ctx, "persist token pool transaction", func(attempt int) (bool, error) { err := em.database.RunAsGroup(ctx, func(ctx context.Context) error { - // See if this is a confirmation of an unconfirmed pool + // See if this is the result of activating an existing pool existingPool, err := em.loadExisting(ctx, pool) if err != nil { return err } if existingPool != nil { - if existingPool.State == core.TokenPoolStateConfirmed { - log.L(ctx).Debugf("Token pool ID=%s Locator='%s' already confirmed", existingPool.ID, pool.PoolLocator) - return nil // already confirmed + if existingPool.Active { + log.L(ctx).Debugf("Token pool already active ID='%s' Locator='%s'", existingPool.ID, pool.PoolLocator) + return nil // already active } msgIDforRewind = existingPool.Message return em.confirmPool(ctx, existingPool, pool.Event) - } else if pool.TX.ID == nil { - // TransactionID is required if the pool doesn't exist yet - // (but it may be omitted when activating a pool that was received via definition broadcast) + } + + if pool.TX.ID == nil { + // Transaction ID is required if the pool doesn't exist yet + // (it can be omitted above when only activating) log.L(ctx).Errorf("Invalid token pool transaction - ID is nil") return nil // move on } - // See if this pool was submitted locally and needs to be announced - if announcePool, err = em.loadFromOperation(ctx, pool); err != nil { + // See if this pool was submitted locally and needs to be published + if stagedPool, err = em.loadFromOperation(ctx, pool); err != nil { return err - } else if announcePool != nil { - return nil // trigger announce after completion of database transaction + } else if stagedPool != nil { + return nil // trigger publish after completion of database transaction } // Otherwise this event can be ignored @@ -157,7 +168,7 @@ func (em *eventManager) TokenPoolCreated(ctx context.Context, ti tokens.Plugin, if pool.Event != nil { protoID = pool.Event.ProtocolID } - log.L(ctx).Debugf("Handler ignoring token pool created notification. Pool is not active for namespace event='%s' locator='%s'", protoID, pool.PoolLocator) + log.L(ctx).Debugf("Ignoring token pool created notification. No matching pool definition found Event='%s' Locator='%s'", protoID, pool.PoolLocator) return nil }) return err != nil, err @@ -169,21 +180,19 @@ func (em *eventManager) TokenPoolCreated(ctx context.Context, ti tokens.Plugin, em.aggregator.queueMessageRewind(msgIDforRewind) } - if announcePool != nil { + if stagedPool != nil { // If the pool is tied to a contract interface, resolve the methods to be used for later operations - if announcePool.Interface != nil && announcePool.Interface.ID != nil && announcePool.InterfaceFormat != "" { - log.L(ctx).Infof("Querying token connector interface, id=%s", announcePool.ID) - if err := em.assets.ResolvePoolMethods(ctx, announcePool); err != nil { + if stagedPool.Interface != nil && stagedPool.Interface.ID != nil && stagedPool.InterfaceFormat != "" { + log.L(ctx).Infof("Querying token connector interface, id=%s", stagedPool.ID) + if err := em.assets.ResolvePoolMethods(ctx, stagedPool); err != nil { return err } } - // Announce the details of the new token pool + // Publish the details of the new token pool // Other nodes will pass these details to their own token connector for validation/activation of the pool - log.L(ctx).Infof("Announcing token pool, id=%s", announcePool.ID) - err = em.defsender.DefineTokenPool(ctx, &core.TokenPoolAnnouncement{ - Pool: announcePool, - }, false) + log.L(ctx).Infof("Defining token pool, id=%s", stagedPool.ID) + err = em.defsender.DefineTokenPool(ctx, stagedPool, false) } } diff --git a/internal/events/token_pool_created_test.go b/internal/events/token_pool_created_test.go index 7d0597957d..2f60f9a3e7 100644 --- a/internal/events/token_pool_created_test.go +++ b/internal/events/token_pool_created_test.go @@ -24,6 +24,7 @@ import ( "github.com/hyperledger/firefly/mocks/tokenmocks" "github.com/hyperledger/firefly/pkg/blockchain" "github.com/hyperledger/firefly/pkg/core" + "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/tokens" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -51,7 +52,7 @@ func TestTokenPoolCreatedIgnore(t *testing.T) { }, } - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "123").Return(nil, nil, nil) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "123").Return(nil, nil, nil) em.mth.On("FindOperationInTransaction", em.ctx, txID, core.OpTypeTokenCreatePool).Return(nil, nil) err := em.TokenPoolCreated(em.ctx, mti, pool) @@ -76,7 +77,7 @@ func TestTokenPoolCreatedIgnoreNoTX(t *testing.T) { }, } - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "123").Return(nil, nil, nil) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "123").Return(nil, nil, nil) err := em.TokenPoolCreated(em.ctx, mti, pool) assert.NoError(t, err) @@ -111,7 +112,7 @@ func TestTokenPoolCreatedConfirm(t *testing.T) { storedPool := &core.TokenPool{ Namespace: "ns1", ID: fftypes.NewUUID(), - State: core.TokenPoolStatePending, + Active: false, Message: fftypes.NewUUID(), TX: core.TransactionRef{ Type: core.TransactionTypeTokenPool, @@ -119,16 +120,16 @@ func TestTokenPoolCreatedConfirm(t *testing.T) { }, } - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "123").Return(nil, fmt.Errorf("pop")).Once() - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "123").Return(storedPool, nil).Once() + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "123").Return(nil, fmt.Errorf("pop")).Once() + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "123").Return(storedPool, nil).Once() em.mth.On("InsertOrGetBlockchainEvent", em.ctx, mock.MatchedBy(func(e *core.BlockchainEvent) bool { return e.Name == chainPool.Event.Name - })).Return(nil, nil).Once() + })).Return(nil, nil) em.mdi.On("InsertEvent", em.ctx, mock.MatchedBy(func(e *core.Event) bool { return e.Type == core.EventTypeBlockchainEventReceived })).Return(nil).Once() em.mth.On("PersistTransaction", mock.Anything, txID, core.TransactionTypeTokenPool, "0xffffeeee").Return(true, nil).Once() - em.mdi.On("UpsertTokenPool", em.ctx, storedPool).Return(nil).Once() + em.mdi.On("UpsertTokenPool", em.ctx, storedPool, database.UpsertOptimizationExisting).Return(nil).Once() em.mdi.On("InsertEvent", em.ctx, mock.MatchedBy(func(e *core.Event) bool { return e.Type == core.EventTypePoolConfirmed && *e.Reference == *storedPool.ID })).Return(nil).Once() @@ -166,14 +167,14 @@ func TestTokenPoolCreatedAlreadyConfirmed(t *testing.T) { storedPool := &core.TokenPool{ Namespace: "ns1", ID: fftypes.NewUUID(), - State: core.TokenPoolStateConfirmed, + Active: true, TX: core.TransactionRef{ Type: core.TransactionTypeTokenPool, ID: txID, }, } - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "123").Return(storedPool, nil) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "123").Return(storedPool, nil) err := em.TokenPoolCreated(em.ctx, mti, chainPool) assert.NoError(t, err) @@ -206,7 +207,7 @@ func TestTokenPoolCreatedConfirmFailBadSymbol(t *testing.T) { storedPool := &core.TokenPool{ Namespace: "ns1", ID: fftypes.NewUUID(), - State: core.TokenPoolStatePending, + Active: false, Symbol: "FFT", TX: core.TransactionRef{ Type: core.TransactionTypeTokenPool, @@ -214,7 +215,7 @@ func TestTokenPoolCreatedConfirmFailBadSymbol(t *testing.T) { }, } - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "123").Return(storedPool, nil) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "123").Return(storedPool, nil) em.mth.On("FindOperationInTransaction", em.ctx, txID, core.OpTypeTokenCreatePool).Return(&core.Operation{ ID: opID, }, nil) @@ -233,7 +234,7 @@ func TestConfirmPoolBlockchainEventFail(t *testing.T) { Namespace: "ns1", ID: fftypes.NewUUID(), Key: "0x0", - State: core.TokenPoolStatePending, + Active: false, TX: core.TransactionRef{ Type: core.TransactionTypeTokenPool, ID: txID, @@ -263,7 +264,7 @@ func TestConfirmPoolTxFail(t *testing.T) { Namespace: "ns1", ID: fftypes.NewUUID(), Key: "0x0", - State: core.TokenPoolStatePending, + Active: false, TX: core.TransactionRef{ Type: core.TransactionTypeTokenPool, ID: txID, @@ -297,7 +298,7 @@ func TestConfirmPoolUpsertFail(t *testing.T) { Namespace: "ns1", ID: fftypes.NewUUID(), Key: "0x0", - State: core.TokenPoolStatePending, + Active: false, TX: core.TransactionRef{ Type: core.TransactionTypeTokenPool, ID: txID, @@ -316,14 +317,14 @@ func TestConfirmPoolUpsertFail(t *testing.T) { return e.Type == core.EventTypeBlockchainEventReceived })).Return(nil) em.mth.On("PersistTransaction", mock.Anything, txID, core.TransactionTypeTokenPool, "0xffffeeee").Return(true, nil).Once() - em.mdi.On("UpsertTokenPool", em.ctx, storedPool).Return(fmt.Errorf("pop")) + em.mdi.On("UpsertTokenPool", em.ctx, storedPool, database.UpsertOptimizationExisting).Return(fmt.Errorf("pop")) err := em.confirmPool(em.ctx, storedPool, event) assert.EqualError(t, err, "pop") } -func TestTokenPoolCreatedAnnounce(t *testing.T) { +func TestTokenPoolCreatedPublish(t *testing.T) { em := newTestEventManager(t) defer em.cleanup(t) mti := &tokenmocks.Plugin{} @@ -354,11 +355,11 @@ func TestTokenPoolCreatedAnnounce(t *testing.T) { }, } - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "123").Return(nil, nil).Times(2) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "123").Return(nil, nil).Times(2) em.mth.On("FindOperationInTransaction", em.ctx, txID, core.OpTypeTokenCreatePool).Return(nil, fmt.Errorf("pop")).Once() em.mth.On("FindOperationInTransaction", em.ctx, txID, core.OpTypeTokenCreatePool).Return(operation, nil).Once() - em.mds.On("DefineTokenPool", em.ctx, mock.MatchedBy(func(pool *core.TokenPoolAnnouncement) bool { - return pool.Pool.Namespace == "ns1" && pool.Pool.Name == "my-pool" && *pool.Pool.ID == *poolID + em.mds.On("DefineTokenPool", em.ctx, mock.MatchedBy(func(pool *core.TokenPool) bool { + return pool.Namespace == "ns1" && pool.Name == "my-pool" && *pool.ID == *poolID }), false).Return(nil, nil) err := em.TokenPoolCreated(em.ctx, mti, pool) @@ -367,7 +368,7 @@ func TestTokenPoolCreatedAnnounce(t *testing.T) { mti.AssertExpectations(t) } -func TestTokenPoolCreatedAnnounceBadInterface(t *testing.T) { +func TestTokenPoolCreatedPublishBadInterface(t *testing.T) { em := newTestEventManager(t) defer em.cleanup(t) mti := &tokenmocks.Plugin{} @@ -403,7 +404,7 @@ func TestTokenPoolCreatedAnnounceBadInterface(t *testing.T) { InterfaceFormat: "abi", } - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "123").Return(nil, nil).Times(2) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "123").Return(nil, nil).Times(2) em.mth.On("FindOperationInTransaction", em.ctx, txID, core.OpTypeTokenCreatePool).Return(nil, fmt.Errorf("pop")).Once() em.mth.On("FindOperationInTransaction", em.ctx, txID, core.OpTypeTokenCreatePool).Return(operation, nil).Once() em.mam.On("ResolvePoolMethods", em.ctx, mock.MatchedBy(func(pool *core.TokenPool) bool { @@ -416,7 +417,7 @@ func TestTokenPoolCreatedAnnounceBadInterface(t *testing.T) { mti.AssertExpectations(t) } -func TestTokenPoolCreatedAnnounceBadOpInputID(t *testing.T) { +func TestTokenPoolCreatedPublishBadOpInputID(t *testing.T) { em := newTestEventManager(t) defer em.cleanup(t) mti := &tokenmocks.Plugin{} @@ -443,7 +444,7 @@ func TestTokenPoolCreatedAnnounceBadOpInputID(t *testing.T) { }, } - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "123").Return(nil, nil) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "123").Return(nil, nil) em.mth.On("FindOperationInTransaction", em.ctx, txID, core.OpTypeTokenCreatePool).Return(operation, nil) err := em.TokenPoolCreated(em.ctx, mti, pool) @@ -451,7 +452,7 @@ func TestTokenPoolCreatedAnnounceBadOpInputID(t *testing.T) { } -func TestTokenPoolCreatedAnnounceBadOpInputNS(t *testing.T) { +func TestTokenPoolCreatedPublishBadOpInputNS(t *testing.T) { em := newTestEventManager(t) defer em.cleanup(t) mti := &tokenmocks.Plugin{} @@ -480,7 +481,7 @@ func TestTokenPoolCreatedAnnounceBadOpInputNS(t *testing.T) { }, } - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "123").Return(nil, nil) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "123").Return(nil, nil) em.mth.On("FindOperationInTransaction", em.ctx, txID, core.OpTypeTokenCreatePool).Return(operation, nil) err := em.TokenPoolCreated(em.ctx, mti, pool) @@ -488,7 +489,7 @@ func TestTokenPoolCreatedAnnounceBadOpInputNS(t *testing.T) { } -func TestTokenPoolCreatedAnnounceBadSymbol(t *testing.T) { +func TestTokenPoolCreatedPublishBadSymbol(t *testing.T) { em := newTestEventManager(t) defer em.cleanup(t) mti := &tokenmocks.Plugin{} @@ -521,7 +522,7 @@ func TestTokenPoolCreatedAnnounceBadSymbol(t *testing.T) { }, } - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "123").Return(nil, nil).Times(2) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "123").Return(nil, nil).Times(2) em.mth.On("FindOperationInTransaction", em.ctx, txID, core.OpTypeTokenCreatePool).Return(nil, fmt.Errorf("pop")).Once() em.mth.On("FindOperationInTransaction", em.ctx, txID, core.OpTypeTokenCreatePool).Return(operation, nil).Once() diff --git a/internal/events/tokens_approved.go b/internal/events/tokens_approved.go index e5000ac52d..f93b719681 100644 --- a/internal/events/tokens_approved.go +++ b/internal/events/tokens_approved.go @@ -62,8 +62,7 @@ func (em *eventManager) loadApprovalID(ctx context.Context, tx *fftypes.UUID, ap func (em *eventManager) persistTokenApproval(ctx context.Context, approval *tokens.TokenApproval) (valid bool, err error) { // Check that this is from a known pool - // TODO: should cache this lookup for efficiency - pool, err := em.database.GetTokenPoolByLocator(ctx, em.namespace.Name, approval.Connector, approval.PoolLocator) + pool, err := em.getPoolByIDOrLocator(ctx, approval.Pool, approval.Connector, approval.PoolLocator) if err != nil { return false, err } @@ -75,7 +74,7 @@ func (em *eventManager) persistTokenApproval(ctx context.Context, approval *toke approval.Pool = pool.ID // Check that approval has not already been recorded - if existing, err := em.database.GetTokenApprovalByProtocolID(ctx, em.namespace.Name, approval.Connector, approval.ProtocolID); err != nil { + if existing, err := em.database.GetTokenApprovalByProtocolID(ctx, em.namespace.Name, approval.Pool, approval.ProtocolID); err != nil { return false, err } else if existing != nil { log.L(ctx).Warnf("Token approval '%s' has already been recorded - ignoring", approval.ProtocolID) diff --git a/internal/events/tokens_approved_test.go b/internal/events/tokens_approved_test.go index 4894986350..1403e65b79 100644 --- a/internal/events/tokens_approved_test.go +++ b/internal/events/tokens_approved_test.go @@ -34,7 +34,6 @@ func newApproval() *tokens.TokenApproval { PoolLocator: "F1", TokenApproval: core.TokenApproval{ LocalID: fftypes.NewUUID(), - Pool: fftypes.NewUUID(), Connector: "erc1155", Namespace: "ns1", Key: "0x01", @@ -69,10 +68,11 @@ func TestTokensApprovedSucceedWithRetries(t *testing.T) { Namespace: "ns1", } - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "F1").Return(nil, fmt.Errorf("pop")).Once() - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "F1").Return(pool, nil).Times(4) - em.mdi.On("GetTokenApprovalByProtocolID", em.ctx, "ns1", approval.Connector, approval.ProtocolID).Return(nil, fmt.Errorf("pop")).Once() - em.mdi.On("GetTokenApprovalByProtocolID", em.ctx, "ns1", approval.Connector, approval.ProtocolID).Return(nil, nil).Times(3) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "F1").Return(nil, fmt.Errorf("pop")).Once() + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "F1").Return(pool, nil).Once() + em.mam.On("GetTokenPoolByID", em.ctx, pool.ID).Return(pool, nil).Times(3) + em.mdi.On("GetTokenApprovalByProtocolID", em.ctx, "ns1", pool.ID, approval.ProtocolID).Return(nil, fmt.Errorf("pop")).Once() + em.mdi.On("GetTokenApprovalByProtocolID", em.ctx, "ns1", pool.ID, approval.ProtocolID).Return(nil, nil).Times(3) em.mth.On("InsertOrGetBlockchainEvent", em.ctx, mock.MatchedBy(func(e *core.BlockchainEvent) bool { return e.Namespace == pool.Namespace && e.Name == approval.Event.Name })).Return(nil, nil).Times(3) @@ -103,8 +103,8 @@ func TestPersistApprovalDuplicate(t *testing.T) { Namespace: "ns1", } - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "F1").Return(pool, nil) - em.mdi.On("GetTokenApprovalByProtocolID", em.ctx, "ns1", approval.Connector, approval.ProtocolID).Return(&core.TokenApproval{}, nil) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "F1").Return(pool, nil) + em.mdi.On("GetTokenApprovalByProtocolID", em.ctx, "ns1", pool.ID, approval.ProtocolID).Return(&core.TokenApproval{}, nil) valid, err := em.persistTokenApproval(em.ctx, approval) assert.False(t, valid) @@ -122,8 +122,8 @@ func TestPersistApprovalOpFail(t *testing.T) { Namespace: "ns1", } - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "F1").Return(pool, nil) - em.mdi.On("GetTokenApprovalByProtocolID", em.ctx, "ns1", approval.Connector, approval.ProtocolID).Return(nil, nil) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "F1").Return(pool, nil) + em.mdi.On("GetTokenApprovalByProtocolID", em.ctx, "ns1", pool.ID, approval.ProtocolID).Return(nil, nil) em.mth.On("FindOperationInTransaction", em.ctx, approval.TX.ID, core.OpTypeTokenApproval).Return(nil, fmt.Errorf("pop")) valid, err := em.persistTokenApproval(em.ctx, approval) @@ -148,8 +148,8 @@ func TestPersistApprovalBadOp(t *testing.T) { Transaction: fftypes.NewUUID(), } - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "F1").Return(pool, nil) - em.mdi.On("GetTokenApprovalByProtocolID", em.ctx, "ns1", approval.Connector, approval.ProtocolID).Return(nil, nil) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "F1").Return(pool, nil) + em.mdi.On("GetTokenApprovalByProtocolID", em.ctx, "ns1", pool.ID, approval.ProtocolID).Return(nil, nil) em.mth.On("FindOperationInTransaction", em.ctx, approval.TX.ID, core.OpTypeTokenApproval).Return(op, nil) em.mth.On("PersistTransaction", mock.Anything, approval.TX.ID, core.TransactionTypeTokenApproval, "0xffffeeee").Return(false, fmt.Errorf("pop")) @@ -177,8 +177,8 @@ func TestPersistApprovalTxFail(t *testing.T) { }, } - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "F1").Return(pool, nil) - em.mdi.On("GetTokenApprovalByProtocolID", em.ctx, "ns1", approval.Connector, approval.ProtocolID).Return(nil, nil) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "F1").Return(pool, nil) + em.mdi.On("GetTokenApprovalByProtocolID", em.ctx, "ns1", pool.ID, approval.ProtocolID).Return(nil, nil) em.mth.On("FindOperationInTransaction", em.ctx, approval.TX.ID, core.OpTypeTokenApproval).Return(op, nil) em.mdi.On("GetTokenApprovalByID", em.ctx, "ns1", localID).Return(nil, nil) em.mth.On("PersistTransaction", mock.Anything, approval.TX.ID, core.TransactionTypeTokenApproval, "0xffffeeee").Return(false, fmt.Errorf("pop")) @@ -207,8 +207,8 @@ func TestPersistApprovalGetApprovalFail(t *testing.T) { }, } - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "F1").Return(pool, nil) - em.mdi.On("GetTokenApprovalByProtocolID", em.ctx, "ns1", approval.Connector, approval.ProtocolID).Return(nil, nil) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "F1").Return(pool, nil) + em.mdi.On("GetTokenApprovalByProtocolID", em.ctx, "ns1", pool.ID, approval.ProtocolID).Return(nil, nil) em.mth.On("FindOperationInTransaction", em.ctx, approval.TX.ID, core.OpTypeTokenApproval).Return(op, nil) em.mdi.On("GetTokenApprovalByID", em.ctx, "ns1", localID).Return(nil, fmt.Errorf("pop")) @@ -225,7 +225,7 @@ func TestApprovedBadPool(t *testing.T) { mti := &tokenmocks.Plugin{} approval := newApproval() - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "F1").Return(nil, nil) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "F1").Return(nil, nil) err := em.TokensApproved(mti, approval) assert.NoError(t, err) @@ -253,8 +253,8 @@ func TestApprovedWithTransactionRegenerateLocalID(t *testing.T) { }, } - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "F1").Return(pool, nil) - em.mdi.On("GetTokenApprovalByProtocolID", em.ctx, "ns1", approval.Connector, approval.ProtocolID).Return(nil, nil) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "F1").Return(pool, nil) + em.mdi.On("GetTokenApprovalByProtocolID", em.ctx, "ns1", pool.ID, approval.ProtocolID).Return(nil, nil) em.mth.On("FindOperationInTransaction", em.ctx, approval.TX.ID, core.OpTypeTokenApproval).Return(op, nil) em.mth.On("PersistTransaction", mock.Anything, approval.TX.ID, core.TransactionTypeTokenApproval, "0xffffeeee").Return(true, nil) em.mdi.On("GetTokenApprovalByID", em.ctx, "ns1", localID).Return(&core.TokenApproval{}, nil) @@ -294,8 +294,8 @@ func TestApprovedBlockchainEventFail(t *testing.T) { }, } - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "F1").Return(pool, nil) - em.mdi.On("GetTokenApprovalByProtocolID", em.ctx, "ns1", approval.Connector, approval.ProtocolID).Return(nil, nil) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "F1").Return(pool, nil) + em.mdi.On("GetTokenApprovalByProtocolID", em.ctx, "ns1", pool.ID, approval.ProtocolID).Return(nil, nil) em.mth.On("FindOperationInTransaction", em.ctx, approval.TX.ID, core.OpTypeTokenApproval).Return(op, nil) em.mth.On("PersistTransaction", mock.Anything, approval.TX.ID, core.TransactionTypeTokenApproval, "0xffffeeee").Return(true, nil) em.mdi.On("GetTokenApprovalByID", em.ctx, "ns1", localID).Return(&core.TokenApproval{}, nil) @@ -338,8 +338,9 @@ func TestTokensApprovedWithMessageReceived(t *testing.T) { BatchID: fftypes.NewUUID(), } - em.mdi.On("GetTokenApprovalByProtocolID", em.ctx, "ns1", "erc1155", "123").Return(nil, nil).Times(2) - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "F1").Return(pool, nil).Times(2) + em.mdi.On("GetTokenApprovalByProtocolID", em.ctx, "ns1", pool.ID, "123").Return(nil, nil).Times(2) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "F1").Return(pool, nil).Once() + em.mam.On("GetTokenPoolByID", em.ctx, pool.ID).Return(pool, nil).Once() em.mth.On("InsertOrGetBlockchainEvent", em.ctx, mock.MatchedBy(func(e *core.BlockchainEvent) bool { return e.Namespace == pool.Namespace && e.Name == approval.Event.Name })).Return(nil, nil).Times(2) @@ -390,8 +391,9 @@ func TestTokensApprovedWithMessageSend(t *testing.T) { State: core.MessageStateStaged, } - em.mdi.On("GetTokenApprovalByProtocolID", em.ctx, "ns1", "erc1155", "123").Return(nil, nil).Times(2) - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "F1").Return(pool, nil).Times(2) + em.mdi.On("GetTokenApprovalByProtocolID", em.ctx, "ns1", pool.ID, "123").Return(nil, nil).Times(2) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "F1").Return(pool, nil).Once() + em.mam.On("GetTokenPoolByID", em.ctx, pool.ID).Return(pool, nil).Once() em.mth.On("InsertOrGetBlockchainEvent", em.ctx, mock.MatchedBy(func(e *core.BlockchainEvent) bool { return e.Namespace == pool.Namespace && e.Name == approval.Event.Name })).Return(nil, nil).Times(2) diff --git a/internal/events/tokens_transferred.go b/internal/events/tokens_transferred.go index 436d7911c9..74cccc89de 100644 --- a/internal/events/tokens_transferred.go +++ b/internal/events/tokens_transferred.go @@ -61,8 +61,7 @@ func (em *eventManager) loadTransferID(ctx context.Context, tx *fftypes.UUID, tr func (em *eventManager) persistTokenTransfer(ctx context.Context, transfer *tokens.TokenTransfer) (valid bool, err error) { // Check that this is from a known pool - // TODO: should cache this lookup for efficiency - pool, err := em.database.GetTokenPoolByLocator(ctx, em.namespace.Name, transfer.Connector, transfer.PoolLocator) + pool, err := em.getPoolByIDOrLocator(ctx, transfer.Pool, transfer.Connector, transfer.PoolLocator) if err != nil { return false, err } @@ -73,14 +72,6 @@ func (em *eventManager) persistTokenTransfer(ctx context.Context, transfer *toke transfer.Namespace = pool.Namespace transfer.Pool = pool.ID - // Check that transfer has not already been recorded - if existing, err := em.database.GetTokenTransferByProtocolID(ctx, em.namespace.Name, transfer.Connector, transfer.ProtocolID); err != nil { - return false, err - } else if existing != nil { - log.L(ctx).Warnf("Token transfer '%s' has already been recorded - ignoring", transfer.ProtocolID) - return false, nil - } - if transfer.TX.ID == nil { transfer.LocalID = fftypes.NewUUID() } else { @@ -92,6 +83,7 @@ func (em *eventManager) persistTokenTransfer(ctx context.Context, transfer *toke } } + // This is a no-op if we've already persisted this token transfer chainEvent := buildBlockchainEvent(pool.Namespace, nil, transfer.Event, &core.BlockchainTransactionRef{ ID: transfer.TX.ID, Type: transfer.TX.Type, @@ -103,10 +95,18 @@ func (em *eventManager) persistTokenTransfer(ctx context.Context, transfer *toke em.emitBlockchainEventMetric(transfer.Event) transfer.BlockchainEvent = chainEvent.ID - if err := em.database.UpsertTokenTransfer(ctx, &transfer.TokenTransfer); err != nil { + // This is a no-op if we've already persisted this token transfer + existing, err := em.database.InsertOrGetTokenTransfer(ctx, &transfer.TokenTransfer) + + if err != nil { log.L(ctx).Errorf("Failed to record token transfer '%s': %s", transfer.ProtocolID, err) return false, err } + if existing != nil { + log.L(ctx).Debugf("Ignoring duplicate token transfer event %s", existing.ProtocolID) + return false, nil + } + if err := em.database.UpdateTokenBalances(ctx, &transfer.TokenTransfer); err != nil { log.L(ctx).Errorf("Failed to update accounts %s -> %s for token transfer '%s': %s", transfer.From, transfer.To, transfer.ProtocolID, err) return false, err diff --git a/internal/events/tokens_transferred_test.go b/internal/events/tokens_transferred_test.go index 5f8c26f528..489f249875 100644 --- a/internal/events/tokens_transferred_test.go +++ b/internal/events/tokens_transferred_test.go @@ -69,18 +69,17 @@ func TestTokensTransferredSucceedWithRetries(t *testing.T) { Namespace: "ns1", } - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "F1").Return(nil, fmt.Errorf("pop")).Once() - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "F1").Return(pool, nil).Times(4) - em.mdi.On("GetTokenTransferByProtocolID", em.ctx, "ns1", "erc1155", "123").Return(nil, fmt.Errorf("pop")).Once() - em.mdi.On("GetTokenTransferByProtocolID", em.ctx, "ns1", "erc1155", "123").Return(nil, nil).Times(3) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "F1").Return(nil, fmt.Errorf("pop")).Once() + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "F1").Return(pool, nil).Once() + em.mam.On("GetTokenPoolByID", em.ctx, pool.ID).Return(pool, nil).Times(2) em.mth.On("InsertOrGetBlockchainEvent", em.ctx, mock.MatchedBy(func(e *core.BlockchainEvent) bool { return e.Namespace == pool.Namespace && e.Name == transfer.Event.Name })).Return(nil, nil).Times(3) em.mdi.On("InsertEvent", em.ctx, mock.MatchedBy(func(ev *core.Event) bool { return ev.Type == core.EventTypeBlockchainEventReceived && ev.Namespace == pool.Namespace })).Return(nil).Times(3) - em.mdi.On("UpsertTokenTransfer", em.ctx, &transfer.TokenTransfer).Return(fmt.Errorf("pop")).Once() - em.mdi.On("UpsertTokenTransfer", em.ctx, &transfer.TokenTransfer).Return(nil).Times(2) + em.mdi.On("InsertOrGetTokenTransfer", em.ctx, &transfer.TokenTransfer).Return(nil, fmt.Errorf("pop")).Once() + em.mdi.On("InsertOrGetTokenTransfer", em.ctx, &transfer.TokenTransfer).Return(nil, nil).Times(2) em.mdi.On("UpdateTokenBalances", em.ctx, &transfer.TokenTransfer).Return(fmt.Errorf("pop")).Once() em.mdi.On("UpdateTokenBalances", em.ctx, &transfer.TokenTransfer).Return(nil).Once() em.mdi.On("InsertEvent", em.ctx, mock.MatchedBy(func(ev *core.Event) bool { @@ -93,27 +92,6 @@ func TestTokensTransferredSucceedWithRetries(t *testing.T) { mti.AssertExpectations(t) } -func TestTokensTransferredIgnoreExisting(t *testing.T) { - em := newTestEventManager(t) - defer em.cleanup(t) - - mti := &tokenmocks.Plugin{} - - transfer := newTransfer() - pool := &core.TokenPool{ - ID: fftypes.NewUUID(), - Namespace: "ns1", - } - - em.mdi.On("GetTokenTransferByProtocolID", em.ctx, "ns1", "erc1155", "123").Return(&core.TokenTransfer{}, nil) - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "F1").Return(pool, nil) - - err := em.TokensTransferred(mti, transfer) - assert.NoError(t, err) - - mti.AssertExpectations(t) -} - func TestPersistTransferOpFail(t *testing.T) { em := newTestEventManager(t) defer em.cleanup(t) @@ -124,14 +102,12 @@ func TestPersistTransferOpFail(t *testing.T) { Namespace: "ns1", } - em.mdi.On("GetTokenTransferByProtocolID", em.ctx, "ns1", "erc1155", "123").Return(nil, nil) - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "F1").Return(pool, nil) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "F1").Return(pool, nil) em.mth.On("FindOperationInTransaction", em.ctx, transfer.TX.ID, core.OpTypeTokenTransfer).Return(nil, fmt.Errorf("pop")) valid, err := em.persistTokenTransfer(em.ctx, transfer) assert.False(t, valid) assert.EqualError(t, err, "pop") - } func TestPersistTransferBadOp(t *testing.T) { @@ -150,8 +126,7 @@ func TestPersistTransferBadOp(t *testing.T) { Transaction: fftypes.NewUUID(), } - em.mdi.On("GetTokenTransferByProtocolID", em.ctx, "ns1", "erc1155", "123").Return(nil, nil) - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "F1").Return(pool, nil) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "F1").Return(pool, nil) em.mth.On("FindOperationInTransaction", em.ctx, transfer.TX.ID, core.OpTypeTokenTransfer).Return(op, nil) em.mth.On("PersistTransaction", mock.Anything, transfer.TX.ID, core.TransactionTypeTokenTransfer, "0xffffeeee").Return(false, fmt.Errorf("pop")) @@ -177,8 +152,7 @@ func TestPersistTransferTxFail(t *testing.T) { }, } - em.mdi.On("GetTokenTransferByProtocolID", em.ctx, "ns1", "erc1155", "123").Return(nil, nil) - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "F1").Return(pool, nil) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "F1").Return(pool, nil) em.mth.On("FindOperationInTransaction", em.ctx, transfer.TX.ID, core.OpTypeTokenTransfer).Return(op, nil) em.mth.On("PersistTransaction", mock.Anything, transfer.TX.ID, core.TransactionTypeTokenTransfer, "0xffffeeee").Return(false, fmt.Errorf("pop")) @@ -206,8 +180,7 @@ func TestPersistTransferGetTransferFail(t *testing.T) { }, } - em.mdi.On("GetTokenTransferByProtocolID", em.ctx, "ns1", "erc1155", "123").Return(nil, nil) - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "F1").Return(pool, nil) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "F1").Return(pool, nil) em.mth.On("FindOperationInTransaction", em.ctx, transfer.TX.ID, core.OpTypeTokenTransfer).Return(op, nil) em.mdi.On("GetTokenTransferByID", em.ctx, "ns1", localID).Return(nil, fmt.Errorf("pop")) @@ -235,8 +208,7 @@ func TestPersistTransferBlockchainEventFail(t *testing.T) { }, } - em.mdi.On("GetTokenTransferByProtocolID", em.ctx, "ns1", "erc1155", "123").Return(nil, nil) - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "F1").Return(pool, nil) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "F1").Return(pool, nil) em.mth.On("FindOperationInTransaction", em.ctx, transfer.TX.ID, core.OpTypeTokenTransfer).Return(op, nil) em.mth.On("PersistTransaction", mock.Anything, transfer.TX.ID, core.TransactionTypeTokenTransfer, "0xffffeeee").Return(true, nil) em.mdi.On("GetTokenTransferByID", em.ctx, "ns1", localID).Return(nil, nil) @@ -270,8 +242,7 @@ func TestTokensTransferredWithTransactionRegenerateLocalID(t *testing.T) { }, } - em.mdi.On("GetTokenTransferByProtocolID", em.ctx, "ns1", "erc1155", "123").Return(nil, nil) - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "F1").Return(pool, nil) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "F1").Return(pool, nil) em.mth.On("FindOperationInTransaction", em.ctx, transfer.TX.ID, core.OpTypeTokenTransfer).Return(op, nil) em.mth.On("PersistTransaction", mock.Anything, transfer.TX.ID, core.TransactionTypeTokenTransfer, "0xffffeeee").Return(true, nil) em.mdi.On("GetTokenTransferByID", em.ctx, "ns1", localID).Return(&core.TokenTransfer{}, nil) @@ -281,7 +252,7 @@ func TestTokensTransferredWithTransactionRegenerateLocalID(t *testing.T) { em.mdi.On("InsertEvent", em.ctx, mock.MatchedBy(func(ev *core.Event) bool { return ev.Type == core.EventTypeBlockchainEventReceived && ev.Namespace == pool.Namespace })).Return(nil) - em.mdi.On("UpsertTokenTransfer", em.ctx, &transfer.TokenTransfer).Return(nil) + em.mdi.On("InsertOrGetTokenTransfer", em.ctx, &transfer.TokenTransfer).Return(nil, nil) em.mdi.On("UpdateTokenBalances", em.ctx, &transfer.TokenTransfer).Return(nil) valid, err := em.persistTokenTransfer(em.ctx, transfer) @@ -293,6 +264,47 @@ func TestTokensTransferredWithTransactionRegenerateLocalID(t *testing.T) { mti.AssertExpectations(t) } +func TestTokensTransferredWithExistingTransfer(t *testing.T) { + em := newTestEventManager(t) + defer em.cleanup(t) + + mti := &tokenmocks.Plugin{} + + transfer := newTransfer() + pool := &core.TokenPool{ + ID: fftypes.NewUUID(), + Namespace: "ns1", + } + localID := fftypes.NewUUID() + op := &core.Operation{ + Input: fftypes.JSONObject{ + "localId": localID.String(), + "connector": transfer.Connector, + "pool": pool.ID.String(), + }, + } + + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "F1").Return(pool, nil) + em.mth.On("InsertOrGetBlockchainEvent", em.ctx, mock.MatchedBy(func(e *core.BlockchainEvent) bool { + return e.Namespace == pool.Namespace && e.Name == transfer.Event.Name + })).Return(nil, nil) + em.mdi.On("InsertEvent", em.ctx, mock.MatchedBy(func(ev *core.Event) bool { + return ev.Type == core.EventTypeBlockchainEventReceived && ev.Namespace == pool.Namespace + })).Return(nil) + em.mth.On("FindOperationInTransaction", em.ctx, transfer.TX.ID, core.OpTypeTokenTransfer).Return(op, nil) + em.mth.On("PersistTransaction", mock.Anything, transfer.TX.ID, core.TransactionTypeTokenTransfer, "0xffffeeee").Return(true, nil) + em.mdi.On("GetTokenTransferByID", em.ctx, "ns1", localID).Return(&core.TokenTransfer{}, nil) + em.mdi.On("InsertOrGetTokenTransfer", em.ctx, &transfer.TokenTransfer).Return(&core.TokenTransfer{Type: core.TokenTransferTypeMint}, nil) + + valid, err := em.persistTokenTransfer(em.ctx, transfer) + assert.False(t, valid) + assert.NoError(t, err) + + assert.NotEqual(t, *localID, *transfer.LocalID) + + mti.AssertExpectations(t) +} + func TestTokensTransferredBadPool(t *testing.T) { em := newTestEventManager(t) defer em.cleanup(t) @@ -301,7 +313,7 @@ func TestTokensTransferredBadPool(t *testing.T) { transfer := newTransfer() - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "F1").Return(nil, nil) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "F1").Return(nil, nil) err := em.TokensTransferred(mti, transfer) assert.NoError(t, err) @@ -345,15 +357,15 @@ func TestTokensTransferredWithMessageReceived(t *testing.T) { BatchID: fftypes.NewUUID(), } - em.mdi.On("GetTokenTransferByProtocolID", em.ctx, "ns1", "erc1155", "123").Return(nil, nil).Times(2) - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "F1").Return(pool, nil).Times(2) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "F1").Return(pool, nil).Once() + em.mam.On("GetTokenPoolByID", em.ctx, pool.ID).Return(pool, nil).Once() em.mth.On("InsertOrGetBlockchainEvent", em.ctx, mock.MatchedBy(func(e *core.BlockchainEvent) bool { return e.Namespace == pool.Namespace && e.Name == transfer.Event.Name })).Return(nil, nil).Times(2) em.mdi.On("InsertEvent", em.ctx, mock.MatchedBy(func(ev *core.Event) bool { return ev.Type == core.EventTypeBlockchainEventReceived && ev.Namespace == pool.Namespace })).Return(nil).Times(2) - em.mdi.On("UpsertTokenTransfer", em.ctx, &transfer.TokenTransfer).Return(nil).Times(2) + em.mdi.On("InsertOrGetTokenTransfer", em.ctx, &transfer.TokenTransfer).Return(nil, nil).Times(2) em.mdi.On("UpdateTokenBalances", em.ctx, &transfer.TokenTransfer).Return(nil).Times(2) em.mdi.On("GetMessageByID", em.ctx, "ns1", transfer.Message).Return(nil, fmt.Errorf("pop")).Once() em.mdi.On("GetMessageByID", em.ctx, "ns1", transfer.Message).Return(message, nil).Once() @@ -404,15 +416,15 @@ func TestTokensTransferredWithMessageSend(t *testing.T) { State: core.MessageStateStaged, } - em.mdi.On("GetTokenTransferByProtocolID", em.ctx, "ns1", "erc1155", "123").Return(nil, nil).Times(2) - em.mdi.On("GetTokenPoolByLocator", em.ctx, "ns1", "erc1155", "F1").Return(pool, nil).Times(2) + em.mam.On("GetTokenPoolByLocator", em.ctx, "erc1155", "F1").Return(pool, nil).Once() + em.mam.On("GetTokenPoolByID", em.ctx, pool.ID).Return(pool, nil).Once() em.mth.On("InsertOrGetBlockchainEvent", em.ctx, mock.MatchedBy(func(e *core.BlockchainEvent) bool { return e.Namespace == pool.Namespace && e.Name == transfer.Event.Name })).Return(nil, nil).Times(2) em.mdi.On("InsertEvent", em.ctx, mock.MatchedBy(func(ev *core.Event) bool { return ev.Type == core.EventTypeBlockchainEventReceived && ev.Namespace == pool.Namespace })).Return(nil).Times(2) - em.mdi.On("UpsertTokenTransfer", em.ctx, &transfer.TokenTransfer).Return(nil).Times(2) + em.mdi.On("InsertOrGetTokenTransfer", em.ctx, &transfer.TokenTransfer).Return(nil, nil).Times(2) em.mdi.On("UpdateTokenBalances", em.ctx, &transfer.TokenTransfer).Return(nil).Times(2) em.mdi.On("GetMessageByID", em.ctx, "ns1", mock.Anything).Return(message, nil).Times(2) em.mdi.On("ReplaceMessage", em.ctx, mock.MatchedBy(func(msg *core.Message) bool { diff --git a/internal/events/webhooks/webhooks.go b/internal/events/webhooks/webhooks.go index 73e8451903..9c141681eb 100644 --- a/internal/events/webhooks/webhooks.go +++ b/internal/events/webhooks/webhooks.go @@ -1,4 +1,4 @@ -// Copyright © 2023 Kaleido, Inc. +// Copyright © 2024 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -40,11 +40,12 @@ import ( ) type WebHooks struct { - ctx context.Context - capabilities *events.Capabilities - callbacks callbacks - client *resty.Client - connID string + ctx context.Context + capabilities *events.Capabilities + callbacks callbacks + client *resty.Client + connID string + ffrestyConfig *ffresty.Config } type callbacks struct { @@ -56,11 +57,17 @@ type whRequest struct { r *resty.Request url string method string - body fftypes.JSONObject forceJSON bool replyTx string } +type whPayload struct { + input fftypes.JSONObject + parsedData0 fftypes.JSONObject + data0 *fftypes.JSONAny + body interface{} +} + type whResponse struct { Status int `json:"status"` Headers fftypes.JSONObject `json:"headers"` @@ -72,18 +79,24 @@ func (wh *WebHooks) Name() string { return "webhooks" } func (wh *WebHooks) Init(ctx context.Context, config config.Section) (err error) { connID := fftypes.ShortID() - client, err := ffresty.New(ctx, config) + ffrestyConfig, err := ffresty.GenerateConfig(ctx, config) if err != nil { return err } + + client := ffresty.NewWithConfig(ctx, *ffrestyConfig) + *wh = WebHooks{ - ctx: log.WithLogField(ctx, "webhook", wh.connID), - capabilities: &events.Capabilities{}, + ctx: log.WithLogField(ctx, "webhook", wh.connID), + capabilities: &events.Capabilities{ + BatchDelivery: true, + }, callbacks: callbacks{ handlers: make(map[string]events.Callbacks), }, - client: client, - connID: connID, + client: client, + connID: connID, + ffrestyConfig: ffrestyConfig, } return nil } @@ -91,6 +104,10 @@ func (wh *WebHooks) Init(ctx context.Context, config config.Section) (err error) func (wh *WebHooks) SetHandler(namespace string, handler events.Callbacks) error { wh.callbacks.writeLock.Lock() defer wh.callbacks.writeLock.Unlock() + if handler == nil { + delete(wh.callbacks.handlers, namespace) + return nil + } wh.callbacks.handlers[namespace] = handler // We have a single logical connection, that matches all subscriptions return handler.RegisterConnection(wh.connID, func(sr core.SubscriptionRef) bool { return true }) @@ -100,9 +117,75 @@ func (wh *WebHooks) Capabilities() *events.Capabilities { return wh.capabilities } -func (wh *WebHooks) buildRequest(options fftypes.JSONObject, firstData fftypes.JSONObject) (req *whRequest, err error) { +// firstData parses data0 from the data the first time it's needed, and guarantees a non-nil result +func (p *whPayload) firstData() fftypes.JSONObject { + if p.parsedData0 != nil { + return p.parsedData0 + } + if p.data0 == nil { + p.parsedData0 = fftypes.JSONObject{} + } else { + // Use JSONObjectOk instead of JSONObject + // JSONObject fails for datatypes such as array, string, bool, number etc + var valid bool + p.parsedData0, valid = p.data0.JSONObjectOk() + if !valid { + p.parsedData0 = fftypes.JSONObject{ + "value": p.parsedData0, + } + } + } + return p.parsedData0 +} + +func (wh *WebHooks) buildPayload(ctx context.Context, sub *core.Subscription, event *core.CombinedEventDataDelivery) *whPayload { + + withData := sub.Options.WithData != nil && *sub.Options.WithData + options := sub.Options.TransportOptions() + p := &whPayload{ + // Options on how to process the input + input: options.GetObject("input"), + } + + allData := make([]*fftypes.JSONAny, 0, len(event.Data)) + if withData { + for _, d := range event.Data { + if d.Value != nil { + allData = append(allData, d.Value) + if p.data0 == nil { + p.data0 = d.Value + } + } + } + } + + // Choose to sub-select a field to send as the body + var bodyFromFirstData *fftypes.JSONAny + inputBody := p.input.GetString("body") + if inputBody != "" { + bodyFromFirstData = fftypes.JSONAnyPtr(p.firstData().GetObject(inputBody).String()) + } + + switch { + case bodyFromFirstData != nil: + // We might have been told to extract a body from the first data record + p.body = bodyFromFirstData.String() + case len(allData) > 1: + // We've got an array of data to POST + p.body = allData + case len(allData) == 1: + // Just send the first object, forced into an object per the rules in firstData() + p.body = p.firstData() + default: + // Just send the event itself + p.body = event.Event + } + return p +} + +func (wh *WebHooks) buildRequest(ctx context.Context, restyClient *resty.Client, options fftypes.JSONObject, p *whPayload) (req *whRequest, err error) { req = &whRequest{ - r: wh.client.R(). + r: restyClient.R(). SetDoNotParseResponse(true). SetContext(wh.ctx), url: options.GetString("url"), @@ -111,7 +194,7 @@ func (wh *WebHooks) buildRequest(options fftypes.JSONObject, firstData fftypes.J replyTx: options.GetString("replytx"), } if req.url == "" { - return nil, i18n.NewError(wh.ctx, coremsgs.MsgWebhookURLEmpty) + return nil, i18n.NewError(ctx, coremsgs.MsgWebhookURLEmpty) } if req.method == "" { req.method = http.MethodPost @@ -120,7 +203,7 @@ func (wh *WebHooks) buildRequest(options fftypes.JSONObject, firstData fftypes.J for h, v := range headers { s, ok := v.(string) if !ok { - return nil, i18n.NewError(wh.ctx, coremsgs.MsgWebhookInvalidStringMap, "headers", h, v) + return nil, i18n.NewError(ctx, coremsgs.MsgWebhookInvalidStringMap, "headers", h, v) } _ = req.r.SetHeader(h, s) } @@ -132,38 +215,32 @@ func (wh *WebHooks) buildRequest(options fftypes.JSONObject, firstData fftypes.J for q, v := range query { s, ok := v.(string) if !ok { - return nil, i18n.NewError(wh.ctx, coremsgs.MsgWebhookInvalidStringMap, "query", q, v) + return nil, i18n.NewError(ctx, coremsgs.MsgWebhookInvalidStringMap, "query", q, v) } _ = req.r.SetQueryParam(q, s) } - if firstData != nil { - // Options on how to process the input - input := options.GetObject("input") + // p will be nil for a batch delivery + if p != nil { // Dynamic query support from input - inputQuery := input.GetString("query") + inputQuery := p.input.GetString("query") if inputQuery != "" { - iq := firstData.GetObject(inputQuery) + iq := p.firstData().GetObject(inputQuery) for q := range iq { _ = req.r.SetQueryParam(q, iq.GetString(q)) } } // Dynamic header support from input - inputHeaders := input.GetString("headers") + inputHeaders := p.input.GetString("headers") if inputHeaders != "" { - ih := firstData.GetObject(inputHeaders) + ih := p.firstData().GetObject(inputHeaders) for h := range ih { _ = req.r.SetHeader(h, ih.GetString(h)) } } - // Choose to sub-select a field to send as the body - inputBody := input.GetString("body") - if inputBody != "" { - req.body = firstData.GetObject(inputBody) - } // Choose to add an additional dynamic path - inputPath := input.GetString("path") + inputPath := p.input.GetString("path") if inputPath != "" { - extraPath := strings.TrimPrefix(firstData.GetString(inputPath), "/") + extraPath := strings.TrimPrefix(p.firstData().GetString(inputPath), "/") if len(extraPath) > 0 { pathSegments := strings.Split(extraPath, "/") for _, ps := range pathSegments { @@ -172,9 +249,9 @@ func (wh *WebHooks) buildRequest(options fftypes.JSONObject, firstData fftypes.J } } // Choose to add an additional dynamic path - inputTxtype := input.GetString("replytx") + inputTxtype := p.input.GetString("replytx") if inputTxtype != "" { - txType := firstData.GetString(inputTxtype) + txType := p.firstData().GetString(inputTxtype) if len(txType) > 0 { req.replyTx = txType if strings.EqualFold(txType, "true") { @@ -186,67 +263,138 @@ func (wh *WebHooks) buildRequest(options fftypes.JSONObject, firstData fftypes.J return req, err } -func (wh *WebHooks) ValidateOptions(options *core.SubscriptionOptions) error { +func (wh *WebHooks) ValidateOptions(ctx context.Context, options *core.SubscriptionOptions) error { if options.WithData == nil { defaultTrue := true options.WithData = &defaultTrue } - _, err := wh.buildRequest(options.TransportOptions(), fftypes.JSONObject{}) - return err -} -func (wh *WebHooks) attemptRequest(sub *core.Subscription, event *core.EventDelivery, data core.DataArray) (req *whRequest, res *whResponse, err error) { - withData := sub.Options.WithData != nil && *sub.Options.WithData - allData := make([]*fftypes.JSONAny, 0, len(data)) - var firstData fftypes.JSONObject - var valid bool - if withData { - for _, d := range data { - if d.Value != nil { - allData = append(allData, d.Value) + newFFRestyConfig := ffresty.Config{} + if wh.ffrestyConfig != nil { + // Take a copy of the webhooks global resty config + newFFRestyConfig = *wh.ffrestyConfig + } + if options.Retry.Enabled { + newFFRestyConfig.Retry = true + if options.Retry.Count > 0 { + newFFRestyConfig.RetryCount = options.Retry.Count + } + + if options.Retry.InitialDelay != "" { + ffd, err := fftypes.ParseDurationString(options.Retry.InitialDelay, time.Millisecond) + if err != nil { + return err } + newFFRestyConfig.RetryInitialDelay = fftypes.FFDuration(time.Duration(ffd)) } - if len(allData) == 0 { - firstData = fftypes.JSONObject{} - } else { - // Use JSONObjectOk instead of JSONObject - // JSONObject fails for datatypes such as array, string, bool, number etc - firstData, valid = allData[0].JSONObjectOk() - if !valid { - firstData = fftypes.JSONObject{ - "value": allData[0], - } + + if options.Retry.MaximumDelay != "" { + ffd, err := fftypes.ParseDurationString(options.Retry.MaximumDelay, time.Millisecond) + if err != nil { + return err } + newFFRestyConfig.RetryMaximumDelay = fftypes.FFDuration(time.Duration(ffd)) } } - req, err = wh.buildRequest(sub.Options.TransportOptions(), firstData) + if options.HTTPOptions.HTTPMaxIdleConns > 0 { + newFFRestyConfig.HTTPMaxIdleConns = options.HTTPOptions.HTTPMaxIdleConns + } + + if options.HTTPOptions.HTTPRequestTimeout != "" { + ffd, err := fftypes.ParseDurationString(options.HTTPOptions.HTTPRequestTimeout, time.Millisecond) + if err != nil { + return err + } + newFFRestyConfig.HTTPRequestTimeout = fftypes.FFDuration(time.Duration(ffd)) + } + + if options.HTTPOptions.HTTPIdleConnTimeout != "" { + ffd, err := fftypes.ParseDurationString(options.HTTPOptions.HTTPIdleConnTimeout, time.Millisecond) + if err != nil { + return err + } + newFFRestyConfig.HTTPIdleConnTimeout = fftypes.FFDuration(time.Duration(ffd)) + } + + if options.HTTPOptions.HTTPExpectContinueTimeout != "" { + ffd, err := fftypes.ParseDurationString(options.HTTPOptions.HTTPExpectContinueTimeout, time.Millisecond) + if err != nil { + return err + } + newFFRestyConfig.HTTPExpectContinueTimeout = fftypes.FFDuration(time.Duration(ffd)) + } + + if options.HTTPOptions.HTTPConnectionTimeout != "" { + ffd, err := fftypes.ParseDurationString(options.HTTPOptions.HTTPConnectionTimeout, time.Millisecond) + if err != nil { + return err + } + newFFRestyConfig.HTTPConnectionTimeout = fftypes.FFDuration(time.Duration(ffd)) + } + + if options.HTTPOptions.HTTPTLSHandshakeTimeout != "" { + ffd, err := fftypes.ParseDurationString(options.HTTPOptions.HTTPTLSHandshakeTimeout, time.Millisecond) + if err != nil { + return err + } + newFFRestyConfig.HTTPTLSHandshakeTimeout = fftypes.FFDuration(time.Duration(ffd)) + } + + if options.HTTPOptions.HTTPProxyURL != nil { + newFFRestyConfig.ProxyURL = *options.HTTPOptions.HTTPProxyURL + } + + if options.TLSConfig != nil { + newFFRestyConfig.TLSClientConfig = options.TLSConfig + } + + // NOTE: this is the plugin context, as the context passed through can be terminated as part of a + // API call or anything else and we want to use this client later on!! + // So these clients should live as long as the plugin exists + options.RestyClient = ffresty.NewWithConfig(wh.ctx, newFFRestyConfig) + + _, err := wh.buildRequest(ctx, options.RestyClient, options.TransportOptions(), nil) + return err +} + +func (wh *WebHooks) attemptRequest(ctx context.Context, sub *core.Subscription, events []*core.CombinedEventDataDelivery, batch bool) (req *whRequest, res *whResponse, err error) { + + var payloadForBuildingRequest *whPayload // only set for a single event delivery + var requestBody interface{} + if len(events) == 1 && !batch { + payloadForBuildingRequest = wh.buildPayload(ctx, sub, events[0]) + // Payload for POST/PATCH/PUT is what is calculated for a single event in buildPayload + requestBody = payloadForBuildingRequest.body + } else { + batchBody := make([]interface{}, len(events)) + for i, event := range events { + // We only use the body itself from the whPayload - then discard it. + p := wh.buildPayload(ctx, sub, event) + batchBody[i] = p.body + } + // Payload for POST/PATCH/PUT is the array of outputs calculated for a each event in buildPayload + requestBody = batchBody + } + + client := wh.client + if sub.Options.RestyClient != nil { + client = sub.Options.RestyClient + } + + req, err = wh.buildRequest(ctx, client, sub.Options.TransportOptions(), payloadForBuildingRequest) if err != nil { return nil, nil, err } if req.method == http.MethodPost || req.method == http.MethodPatch || req.method == http.MethodPut { - switch { - case req.body != nil: - // We might have been told to extract a body from the first data record - req.r.SetBody(req.body) - case len(allData) > 1: - // We've got an array of data to POST - req.r.SetBody(allData) - case len(allData) == 1: - // Just send the first object directly - req.r.SetBody(firstData) - default: - // Just send the event itself - req.r.SetBody(event) - - } + req.r.SetBody(requestBody) } - log.L(wh.ctx).Debugf("Webhook-> %s %s event %s on subscription %s", req.method, req.url, event.ID, sub.ID) + // log.L(wh.ctx).Debugf("Webhook-> %s %s event %s on subscription %s", req.method, req.url, event.ID, sub.ID) resp, err := req.r.Execute(req.method, req.url) if err != nil { - log.L(wh.ctx).Errorf("Webhook<- %s %s event %s on subscription %s failed: %s", req.method, req.url, event.ID, sub.ID, err) + // log.L(ctx).Errorf("Webhook<- %s %s event %s on subscription %s failed: %s", req.method, req.url, event.ID, sub.ID, err) return nil, nil, err } defer func() { _ = resp.RawBody().Close() }() @@ -255,13 +403,13 @@ func (wh *WebHooks) attemptRequest(sub *core.Subscription, event *core.EventDeli Status: resp.StatusCode(), Headers: fftypes.JSONObject{}, } - log.L(wh.ctx).Infof("Webhook<- %s %s event %s on subscription %s returned %d", req.method, req.url, event.ID, sub.ID, res.Status) + // log.L(wh.ctx).Infof("Webhook<- %s %s event %s on subscription %s returned %d", req.method, req.url, event.ID, sub.ID, res.Status) header := resp.Header() for h := range header { res.Headers[h] = header.Get(h) } contentType := header.Get("Content-Type") - log.L(wh.ctx).Debugf("Response content-type '%s' forceJSON=%t", contentType, req.forceJSON) + log.L(ctx).Debugf("Response content-type '%s' forceJSON=%t", contentType, req.forceJSON) if req.forceJSON { contentType = "application/json" } @@ -270,7 +418,7 @@ func (wh *WebHooks) attemptRequest(sub *core.Subscription, event *core.EventDeli var resData interface{} err = json.NewDecoder(resp.RawBody()).Decode(&resData) if err != nil { - return nil, nil, i18n.WrapError(wh.ctx, err, coremsgs.MsgWebhooksReplyBadJSON) + return nil, nil, i18n.WrapError(ctx, err, coremsgs.MsgWebhooksReplyBadJSON) } b, _ := json.Marshal(&resData) // we know we can re-marshal It res.Body = fftypes.JSONAnyPtrBytes(b) @@ -288,8 +436,8 @@ func (wh *WebHooks) attemptRequest(sub *core.Subscription, event *core.EventDeli return req, res, nil } -func (wh *WebHooks) doDelivery(connID string, reply bool, sub *core.Subscription, event *core.EventDelivery, data core.DataArray, fastAck bool) { - req, res, gwErr := wh.attemptRequest(sub, event, data) +func (wh *WebHooks) doDelivery(ctx context.Context, connID string, reply bool, sub *core.Subscription, events []*core.CombinedEventDataDelivery, fastAck, batched bool) { + req, res, gwErr := wh.attemptRequest(ctx, sub, events, batched) if gwErr != nil { // Generate a bad-gateway error response - we always want to send something back, // rather than just causing timeouts @@ -308,47 +456,52 @@ func (wh *WebHooks) doDelivery(connID string, reply bool, sub *core.Subscription b, _ := json.Marshal(&res) log.L(wh.ctx).Tracef("Webhook response: %s", string(b)) - // Emit the response - if reply && event.Message != nil { - txType := fftypes.FFEnum(strings.ToLower(sub.Options.TransportOptions().GetString("replytx"))) - if req != nil && req.replyTx != "" { - txType = fftypes.FFEnum(strings.ToLower(req.replyTx)) - } - if cb, ok := wh.callbacks.handlers[sub.Namespace]; ok { - log.L(wh.ctx).Debugf("Sending reply message for %s CID=%s", event.ID, event.Message.Header.ID) - cb.DeliveryResponse(connID, &core.EventDeliveryResponse{ - ID: event.ID, - Rejected: false, - Subscription: event.Subscription, - Reply: &core.MessageInOut{ - Message: core.Message{ - Header: core.MessageHeader{ - CID: event.Message.Header.ID, - Group: event.Message.Header.Group, - Type: event.Message.Header.Type, - Topics: event.Message.Header.Topics, - Tag: sub.Options.TransportOptions().GetString("replytag"), - TxType: txType, + // For each event emit a response + for _, combinedEvent := range events { + event := combinedEvent.Event + // Emit the response + if reply && event.Message != nil { + txType := fftypes.FFEnum(strings.ToLower(sub.Options.TransportOptions().GetString("replytx"))) + if req != nil && req.replyTx != "" { + txType = fftypes.FFEnum(strings.ToLower(req.replyTx)) + } + if cb, ok := wh.callbacks.handlers[sub.Namespace]; ok { + log.L(wh.ctx).Debugf("Sending reply message for %s CID=%s", event.ID, event.Message.Header.ID) + cb.DeliveryResponse(connID, &core.EventDeliveryResponse{ + ID: event.ID, + Rejected: false, + Subscription: event.Subscription, + Reply: &core.MessageInOut{ + Message: core.Message{ + Header: core.MessageHeader{ + CID: event.Message.Header.ID, + Group: event.Message.Header.Group, + Type: event.Message.Header.Type, + Topics: event.Message.Header.Topics, + Tag: sub.Options.TransportOptions().GetString("replytag"), + TxType: txType, + }, + }, + InlineData: core.InlineData{ + {Value: fftypes.JSONAnyPtrBytes(b)}, }, }, - InlineData: core.InlineData{ - {Value: fftypes.JSONAnyPtrBytes(b)}, - }, - }, - }) - } - } else if !fastAck { - if cb, ok := wh.callbacks.handlers[sub.Namespace]; ok { - cb.DeliveryResponse(connID, &core.EventDeliveryResponse{ - ID: event.ID, - Rejected: false, - Subscription: event.Subscription, - }) + }) + } + } else if !fastAck { + if cb, ok := wh.callbacks.handlers[sub.Namespace]; ok { + cb.DeliveryResponse(connID, &core.EventDeliveryResponse{ + ID: event.ID, + Rejected: false, + Subscription: event.Subscription, + }) + } } } + } -func (wh *WebHooks) DeliveryRequest(connID string, sub *core.Subscription, event *core.EventDelivery, data core.DataArray) error { +func (wh *WebHooks) DeliveryRequest(ctx context.Context, connID string, sub *core.Subscription, event *core.EventDelivery, data core.DataArray) error { reply := sub.Options.TransportOptions().GetBool("reply") if reply && event.Message != nil && event.Message.Header.CID != nil { // We cowardly refuse to dispatch a message that is itself a reply, as it's hard for users to @@ -376,11 +529,63 @@ func (wh *WebHooks) DeliveryRequest(connID string, sub *core.Subscription, event Subscription: event.Subscription, }) } - go wh.doDelivery(connID, reply, sub, event, data, true) + go wh.doDelivery(ctx, connID, reply, sub, []*core.CombinedEventDataDelivery{{Event: event, Data: data}}, true, false) + return nil + } + + // NOTE: We could check here for batching and accumulate but we can't return because this causes the offset to jump... + + // TODO we don't look at the error here? + wh.doDelivery(ctx, connID, reply, sub, []*core.CombinedEventDataDelivery{{Event: event, Data: data}}, false, false) + return nil +} + +func (wh *WebHooks) BatchDeliveryRequest(ctx context.Context, connID string, sub *core.Subscription, events []*core.CombinedEventDataDelivery) error { + reply := sub.Options.TransportOptions().GetBool("reply") + if reply { + nonReplyEvents := []*core.CombinedEventDataDelivery{} + for _, combinedEvent := range events { + event := combinedEvent.Event + // We cowardly refuse to dispatch a message that is itself a reply, as it's hard for users to + // avoid loops - and there's no way for us to detect here if a user has configured correctly + // to avoid a loop. + if event.Message != nil && event.Message.Header.CID != nil { + log.L(wh.ctx).Debugf("Webhook subscription with reply enabled called with reply event '%s'", event.ID) + if cb, ok := wh.callbacks.handlers[sub.Namespace]; ok { + cb.DeliveryResponse(connID, &core.EventDeliveryResponse{ + ID: event.ID, + Rejected: false, + Subscription: event.Subscription, + }) + } + continue + } + + nonReplyEvents = append(nonReplyEvents, combinedEvent) + } + // Override the events to send without the reply events + events = nonReplyEvents + } + + // // In fastack mode we drive calls in parallel to the backend, immediately acknowledging the event + // NOTE: We cannot use this with reply mode, as when we're sending a reply the `DeliveryResponse` + // callback must include the reply in-line. + if !reply && sub.Options.TransportOptions().GetBool("fastack") { + for _, combinedEvent := range events { + event := combinedEvent.Event + if cb, ok := wh.callbacks.handlers[sub.Namespace]; ok { + cb.DeliveryResponse(connID, &core.EventDeliveryResponse{ + ID: event.ID, + Rejected: false, + Subscription: event.Subscription, + }) + } + } + go wh.doDelivery(ctx, connID, reply, sub, events, true, true) return nil } - wh.doDelivery(connID, reply, sub, event, data, false) + wh.doDelivery(ctx, connID, reply, sub, events, false, true) return nil } diff --git a/internal/events/webhooks/webhooks_test.go b/internal/events/webhooks/webhooks_test.go index 76f5c5c56f..ec96cd2f5d 100644 --- a/internal/events/webhooks/webhooks_test.go +++ b/internal/events/webhooks/webhooks_test.go @@ -18,15 +18,27 @@ package webhooks import ( "context" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" "encoding/json" + "encoding/pem" "fmt" + "log" + "math/big" + "net" "net/http" "net/http/httptest" + "os" "testing" "time" "github.com/gorilla/mux" "github.com/hyperledger/firefly-common/pkg/config" + "github.com/hyperledger/firefly-common/pkg/ffresty" + "github.com/hyperledger/firefly-common/pkg/fftls" "github.com/hyperledger/firefly-common/pkg/fftypes" "github.com/hyperledger/firefly/internal/coreconfig" "github.com/hyperledger/firefly/mocks/eventsmocks" @@ -55,6 +67,20 @@ func newTestWebHooks(t *testing.T) (wh *WebHooks, cancel func()) { return wh, cancelCtx } +func TestInitBadTLS(t *testing.T) { + coreconfig.Reset() + + wh := &WebHooks{} + ctx := context.Background() + svrConfig := config.RootSection("ut.webhooks") + wh.InitConfig(svrConfig) + tlsConfig := svrConfig.SubSection("tls") + tlsConfig.Set(fftls.HTTPConfTLSEnabled, true) + tlsConfig.Set(fftls.HTTPConfTLSCAFile, "BADCA") + err := wh.Init(ctx, svrConfig) + assert.Regexp(t, "FF00153", err) +} + func TestValidateOptionsWithDataFalse(t *testing.T) { wh, cancel := newTestWebHooks(t) defer cancel() @@ -66,7 +92,7 @@ func TestValidateOptionsWithDataFalse(t *testing.T) { }, } opts.TransportOptions()["url"] = "/anything" - err := wh.ValidateOptions(opts) + err := wh.ValidateOptions(wh.ctx, opts) assert.NoError(t, err) assert.False(t, *opts.WithData) } @@ -77,9 +103,12 @@ func TestValidateOptionsWithDataDefaulTrue(t *testing.T) { opts := &core.SubscriptionOptions{} opts.TransportOptions()["url"] = "/anything" - err := wh.ValidateOptions(opts) + err := wh.ValidateOptions(wh.ctx, opts) assert.NoError(t, err) assert.True(t, *opts.WithData) + + wh.SetHandler("ns1", nil) + assert.Empty(t, wh.callbacks.handlers) } func TestValidateOptionsBadURL(t *testing.T) { @@ -88,7 +117,7 @@ func TestValidateOptionsBadURL(t *testing.T) { opts := &core.SubscriptionOptions{} opts.TransportOptions() - err := wh.ValidateOptions(opts) + err := wh.ValidateOptions(wh.ctx, opts) assert.Regexp(t, "FF10242", err) } @@ -102,7 +131,7 @@ func TestValidateOptionsBadHeaders(t *testing.T) { opts.TransportOptions()["headers"] = fftypes.JSONObject{ "bad": map[bool]bool{false: true}, } - err := wh.ValidateOptions(opts) + err := wh.ValidateOptions(wh.ctx, opts) assert.Regexp(t, "FF10243.*headers", err) } @@ -116,10 +145,153 @@ func TestValidateOptionsBadQuery(t *testing.T) { opts.TransportOptions()["query"] = fftypes.JSONObject{ "bad": map[bool]bool{false: true}, } - err := wh.ValidateOptions(opts) + err := wh.ValidateOptions(wh.ctx, opts) assert.Regexp(t, "FF10243.*query", err) } +func TestValidateOptionsBadInitialDelayDuration(t *testing.T) { + wh, cancel := newTestWebHooks(t) + defer cancel() + + opts := &core.SubscriptionOptions{} + opts.Retry = core.WebhookRetryOptions{ + Enabled: true, + InitialDelay: "badinitialdelay", + } + err := wh.ValidateOptions(wh.ctx, opts) + assert.Regexp(t, "FF00137", err) +} + +func TestValidateOptionsBadMaxDelayDuration(t *testing.T) { + wh, cancel := newTestWebHooks(t) + defer cancel() + + opts := &core.SubscriptionOptions{} + opts.Retry = core.WebhookRetryOptions{ + Enabled: true, + MaximumDelay: "badmaxdelay", + } + err := wh.ValidateOptions(wh.ctx, opts) + assert.Regexp(t, "FF00137", err) +} + +func TestValidateOptionsBadHTTPRequestTimeout(t *testing.T) { + wh, cancel := newTestWebHooks(t) + defer cancel() + + opts := &core.SubscriptionOptions{} + + opts.HTTPOptions = core.WebhookHTTPOptions{ + HTTPRequestTimeout: "badrequestimeout", + } + err := wh.ValidateOptions(wh.ctx, opts) + assert.Regexp(t, "FF00137", err) +} + +func TestValidateOptionsBadHTTPTLSHandshakeTimeout(t *testing.T) { + wh, cancel := newTestWebHooks(t) + defer cancel() + + opts := &core.SubscriptionOptions{} + + opts.HTTPOptions = core.WebhookHTTPOptions{ + HTTPTLSHandshakeTimeout: "badtimeout", + } + err := wh.ValidateOptions(wh.ctx, opts) + assert.Regexp(t, "FF00137", err) +} +func TestValidateOptionsBadHTTPIdleConnTimeout(t *testing.T) { + wh, cancel := newTestWebHooks(t) + defer cancel() + + opts := &core.SubscriptionOptions{} + + opts.HTTPOptions = core.WebhookHTTPOptions{ + HTTPIdleConnTimeout: "badtimeout", + } + err := wh.ValidateOptions(wh.ctx, opts) + assert.Regexp(t, "FF00137", err) +} +func TestValidateOptionsBadHTTPConnectionTimeout(t *testing.T) { + wh, cancel := newTestWebHooks(t) + defer cancel() + + opts := &core.SubscriptionOptions{} + + opts.HTTPOptions = core.WebhookHTTPOptions{ + HTTPConnectionTimeout: "badtimeout", + } + err := wh.ValidateOptions(wh.ctx, opts) + assert.Regexp(t, "FF00137", err) +} +func TestValidateOptionsBadHTTPExpectedContinueTimeout(t *testing.T) { + wh, cancel := newTestWebHooks(t) + defer cancel() + + opts := &core.SubscriptionOptions{} + + opts.HTTPOptions = core.WebhookHTTPOptions{ + HTTPExpectContinueTimeout: "badtimeout", + } + err := wh.ValidateOptions(wh.ctx, opts) + assert.Regexp(t, "FF00137", err) +} + +func TestValidateOptionsExtraFields(t *testing.T) { + wh, cancel := newTestWebHooks(t) + defer cancel() + + wh.ffrestyConfig = &ffresty.Config{ + URL: "test-url", + } + + opts := &core.SubscriptionOptions{} + opts.TransportOptions()["url"] = "/anything" + + opts.Retry = core.WebhookRetryOptions{ + Enabled: true, + Count: 2, + InitialDelay: "1s", + MaximumDelay: "2s", + } + + proxyURL := "http://myproxy.example.com:8888" + opts.HTTPOptions = core.WebhookHTTPOptions{ + HTTPMaxIdleConns: 2, + HTTPTLSHandshakeTimeout: "2s", + HTTPRequestTimeout: "2s", + HTTPIdleConnTimeout: "2s", + HTTPConnectionTimeout: "2s", + HTTPExpectContinueTimeout: "2s", + HTTPProxyURL: &proxyURL, + } + + opts.TLSConfig = &tls.Config{} + + err := wh.ValidateOptions(wh.ctx, opts) + assert.NoError(t, err) + + assert.Equal(t, opts.RestyClient.RetryCount, 2) + assert.Equal(t, opts.RestyClient.RetryMaxWaitTime, 2*time.Second) + assert.Equal(t, opts.RestyClient.RetryWaitTime, 1*time.Second) + + expectedDuration := 2 * time.Second + assert.Equal(t, opts.RestyClient.GetClient().Timeout, expectedDuration) + + transport, ok := opts.RestyClient.GetClient().Transport.(*http.Transport) + assert.True(t, ok) + assert.Equal(t, transport.IdleConnTimeout, expectedDuration) + assert.Equal(t, transport.ExpectContinueTimeout, expectedDuration) + assert.Equal(t, transport.TLSHandshakeTimeout, expectedDuration) + assert.Equal(t, transport.MaxIdleConns, 2) + assert.NotNil(t, transport.TLSClientConfig) + + req := httptest.NewRequest(http.MethodGet, "http://testany.example.com", nil) + u, err := transport.Proxy(req) + assert.NoError(t, err) + assert.Equal(t, "http://myproxy.example.com:8888", u.String()) +} + func TestRequestWithBodyReplyEndToEnd(t *testing.T) { wh, cancel := newTestWebHooks(t) defer cancel() @@ -224,10 +396,200 @@ func TestRequestWithBodyReplyEndToEnd(t *testing.T) { return true })).Return(nil) - err := wh.DeliveryRequest(mock.Anything, sub, event, core.DataArray{data}) + err := wh.DeliveryRequest(wh.ctx, mock.Anything, sub, event, core.DataArray{data}) + assert.NoError(t, err) + + mcb.AssertExpectations(t) +} + +func TestRequestWithBodyReplyEndToEndWithTLS(t *testing.T) { + wh, cancel := newTestWebHooks(t) + defer cancel() + + r := mux.NewRouter() + r.HandleFunc("/myapi/my/sub/path?escape_query", func(res http.ResponseWriter, req *http.Request) { + assert.Equal(t, "myheaderval", req.Header.Get("My-Header")) + assert.Equal(t, "dynamicheaderval", req.Header.Get("Dynamic-Header")) + assert.Equal(t, "myqueryval", req.URL.Query().Get("my-query")) + assert.Equal(t, "dynamicqueryval", req.URL.Query().Get("dynamic-query")) + var body fftypes.JSONObject + err := json.NewDecoder(req.Body).Decode(&body) + assert.NoError(t, err) + assert.Equal(t, "inputvalue", body.GetString("inputfield")) + res.Header().Set("my-reply-header", "myheaderval2") + res.WriteHeader(200) + res.Write([]byte(`{ + "replyfield": "replyvalue" + }`)) + }).Methods(http.MethodPut) + + // Create an X509 certificate pair + privatekey, _ := rsa.GenerateKey(rand.Reader, 2048) + publickey := &privatekey.PublicKey + var privateKeyBytes []byte = x509.MarshalPKCS1PrivateKey(privatekey) + privateKeyFile, _ := os.CreateTemp("", "key.pem") + defer os.Remove(privateKeyFile.Name()) + privateKeyBlock := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: privateKeyBytes} + pem.Encode(privateKeyFile, privateKeyBlock) + serialNumber, _ := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128)) + x509Template := &x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + Organization: []string{"Unit Tests"}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(1000 * time.Second), + KeyUsage: x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + BasicConstraintsValid: true, + IPAddresses: []net.IP{net.IPv4(127, 0, 0, 1)}, + } + derBytes, err := x509.CreateCertificate(rand.Reader, x509Template, x509Template, publickey, privatekey) + assert.NoError(t, err) + publicKeyFile, _ := os.CreateTemp("", "cert.pem") + defer os.Remove(publicKeyFile.Name()) + pem.Encode(publicKeyFile, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) + + caCert, err := os.ReadFile(publicKeyFile.Name()) + if err != nil { + log.Fatal(err) + } + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + + // Create the TLS Config with the CA pool and enable Client certificate validation + tlsConfig := &tls.Config{ + ClientCAs: caCertPool, + ClientAuth: tls.RequireAndVerifyClientCert, + } + tlsConfig.BuildNameToCertificate() + + // Create a Server instance to listen on port 8443 with the TLS config + server := &http.Server{ + Addr: "127.0.0.1:8443", + TLSConfig: tlsConfig, + } + + ctx, cancelCtx := context.WithCancel(context.Background()) + go func() { + select { + case <-ctx.Done(): + shutdownContext, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + if err := server.Shutdown(shutdownContext); err != nil { + return + } + } + }() + + server.Handler = r + go server.ListenAndServeTLS(publicKeyFile.Name(), privateKeyFile.Name()) + + // Build a TLS config for the client and set on the subscription object + cert, err := tls.LoadX509KeyPair(publicKeyFile.Name(), privateKeyFile.Name()) + assert.NoError(t, err) + clientTLSConfig := &tls.Config{ + RootCAs: caCertPool, + Certificates: []tls.Certificate{cert}, + } + + yes := true + dataID := fftypes.NewUUID() + msgID := fftypes.NewUUID() + groupHash := fftypes.NewRandB32() + + client := ffresty.NewWithConfig(ctx, ffresty.Config{ + HTTPConfig: ffresty.HTTPConfig{ + TLSClientConfig: clientTLSConfig, + }, + }) + sub := &core.Subscription{ + SubscriptionRef: core.SubscriptionRef{ + Namespace: "ns1", + }, + Options: core.SubscriptionOptions{ + SubscriptionCoreOptions: core.SubscriptionCoreOptions{ + WithData: &yes, + }, + WebhookSubOptions: core.WebhookSubOptions{ + RestyClient: client, + }, + }, + } + to := sub.Options.TransportOptions() + to["reply"] = true + to["json"] = true + to["method"] = "PUT" + to["url"] = fmt.Sprintf("https://%s/myapi/", server.Addr) + to["headers"] = map[string]interface{}{ + "my-header": "myheaderval", + } + to["query"] = map[string]interface{}{ + "my-query": "myqueryval", + } + to["input"] = map[string]interface{}{ + "query": "in_query", + "headers": "in_headers", + "body": "in_body", + "path": "in_path", + "replytx": "in_replytx", + } + event := &core.EventDelivery{ + EnrichedEvent: core.EnrichedEvent{ + Event: core.Event{ + ID: fftypes.NewUUID(), + }, + Message: &core.Message{ + Header: core.MessageHeader{ + ID: msgID, + Group: groupHash, + Type: core.MessageTypePrivate, + }, + Data: core.DataRefs{ + {ID: dataID}, + }, + }, + }, + Subscription: core.SubscriptionRef{ + ID: sub.ID, + }, + } + data := &core.Data{ + ID: dataID, + Value: fftypes.JSONAnyPtr(`{ + "in_body": { + "inputfield": "inputvalue" + }, + "in_query": { + "dynamic-query": "dynamicqueryval" + }, + "in_headers": { + "dynamic-header": "dynamicheaderval" + }, + "in_path": "/my/sub/path?escape_query", + "in_replytx": true + }`), + } + + mcb := wh.callbacks.handlers["ns1"].(*eventsmocks.Callbacks) + mcb.On("DeliveryResponse", mock.Anything, mock.MatchedBy(func(response *core.EventDeliveryResponse) bool { + assert.Equal(t, *msgID, *response.Reply.Message.Header.CID) + assert.Equal(t, *groupHash, *response.Reply.Message.Header.Group) + assert.Equal(t, core.MessageTypePrivate, response.Reply.Message.Header.Type) + assert.Equal(t, core.TransactionTypeBatchPin, response.Reply.Message.Header.TxType) + assert.Equal(t, "myheaderval2", response.Reply.InlineData[0].Value.JSONObject().GetObject("headers").GetString("My-Reply-Header")) + assert.Equal(t, "replyvalue", response.Reply.InlineData[0].Value.JSONObject().GetObject("body").GetString("replyfield")) + assert.Equal(t, float64(200), response.Reply.InlineData[0].Value.JSONObject()["status"]) + return true + })).Return(nil) + + err = wh.DeliveryRequest(wh.ctx, mock.Anything, sub, event, core.DataArray{data}) assert.NoError(t, err) mcb.AssertExpectations(t) + + cancelCtx() + } func TestRequestWithEmptyStringBodyReplyEndToEnd(t *testing.T) { @@ -334,7 +696,7 @@ func TestRequestWithEmptyStringBodyReplyEndToEnd(t *testing.T) { return true })).Return(nil) - err := wh.DeliveryRequest(mock.Anything, sub, event, core.DataArray{data}) + err := wh.DeliveryRequest(wh.ctx, mock.Anything, sub, event, core.DataArray{data}) assert.NoError(t, err) mcb.AssertExpectations(t) @@ -401,7 +763,7 @@ func TestRequestNoBodyNoReply(t *testing.T) { return !response.Rejected })).Return(nil) - err := wh.DeliveryRequest(mock.Anything, sub, event, core.DataArray{data}) + err := wh.DeliveryRequest(wh.ctx, mock.Anything, sub, event, core.DataArray{data}) assert.NoError(t, err) assert.True(t, called) @@ -465,7 +827,7 @@ func TestRequestReplyEmptyData(t *testing.T) { return true })).Return(nil) - err := wh.DeliveryRequest(mock.Anything, sub, event, core.DataArray{}) + err := wh.DeliveryRequest(wh.ctx, mock.Anything, sub, event, core.DataArray{}) assert.NoError(t, err) assert.True(t, called) @@ -531,7 +893,7 @@ func TestRequestReplyOneData(t *testing.T) { return true })).Return(nil) - err := wh.DeliveryRequest(mock.Anything, sub, event, core.DataArray{{ID: dataID, Value: fftypes.JSONAnyPtr("foo")}}) + err := wh.DeliveryRequest(wh.ctx, mock.Anything, sub, event, core.DataArray{{ID: dataID, Value: fftypes.JSONAnyPtr("foo")}}) assert.NoError(t, err) mcb.AssertExpectations(t) @@ -584,7 +946,7 @@ func TestRequestReplyBadJSON(t *testing.T) { return true })).Return(nil) - err := wh.DeliveryRequest(mock.Anything, sub, event, core.DataArray{}) + err := wh.DeliveryRequest(wh.ctx, mock.Anything, sub, event, core.DataArray{}) assert.NoError(t, err) mcb.AssertExpectations(t) @@ -653,7 +1015,7 @@ func TestRequestReplyDataArrayBadStatusB64(t *testing.T) { return true })).Return(nil) - err := wh.DeliveryRequest(mock.Anything, sub, event, core.DataArray{ + err := wh.DeliveryRequest(wh.ctx, mock.Anything, sub, event, core.DataArray{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value1"`)}, {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value2"`)}, }) @@ -707,7 +1069,7 @@ func TestRequestReplyDataArrayError(t *testing.T) { return true })).Return(nil) - err := wh.DeliveryRequest(mock.Anything, sub, event, core.DataArray{ + err := wh.DeliveryRequest(wh.ctx, mock.Anything, sub, event, core.DataArray{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value1"`)}, {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value2"`)}, }) @@ -716,7 +1078,7 @@ func TestRequestReplyDataArrayError(t *testing.T) { mcb.AssertExpectations(t) } -func TestWebhookFailFastAsk(t *testing.T) { +func TestWebhookFailFastAck(t *testing.T) { wh, cancel := newTestWebHooks(t) defer cancel() @@ -761,13 +1123,13 @@ func TestWebhookFailFastAsk(t *testing.T) { }) // Drive two deliveries, waiting for them both to ack (noting both will fail) - err := wh.DeliveryRequest(mock.Anything, sub, event, core.DataArray{ + err := wh.DeliveryRequest(wh.ctx, mock.Anything, sub, event, core.DataArray{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value1"`)}, {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value2"`)}, }) assert.NoError(t, err) - err = wh.DeliveryRequest(mock.Anything, sub, event, core.DataArray{ + err = wh.DeliveryRequest(wh.ctx, mock.Anything, sub, event, core.DataArray{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value1"`)}, {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value2"`)}, }) @@ -778,6 +1140,68 @@ func TestWebhookFailFastAsk(t *testing.T) { mcb.AssertExpectations(t) } +func TestWebhookFailFastAckBatch(t *testing.T) { + wh, cancel := newTestWebHooks(t) + defer cancel() + + msgID := fftypes.NewUUID() + r := mux.NewRouter() + server := httptest.NewServer(r) + server.Close() + + sub := &core.Subscription{ + SubscriptionRef: core.SubscriptionRef{ + Namespace: "ns1", + }, + } + sub.Options.TransportOptions()["fastack"] = true + event := &core.EventDelivery{ + EnrichedEvent: core.EnrichedEvent{ + Event: core.Event{ + ID: fftypes.NewUUID(), + }, + Message: &core.Message{ + Header: core.MessageHeader{ + ID: msgID, + Type: core.MessageTypeBroadcast, + }, + }, + }, + Subscription: core.SubscriptionRef{ + ID: sub.ID, + }, + } + + count := 0 + waiter := make(chan struct{}) + mcb := wh.callbacks.handlers["ns1"].(*eventsmocks.Callbacks) + mcb.On("DeliveryResponse", mock.Anything, mock.Anything). + Return(nil). + Run(func(a mock.Arguments) { + count++ + if count == 2 { + close(waiter) + } + }) + + // Drive two deliveries, waiting for them both to ack (noting both will fail) + err := wh.BatchDeliveryRequest(wh.ctx, mock.Anything, sub, []*core.CombinedEventDataDelivery{ + {Event: event, Data: core.DataArray{ + {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value1"`)}, + {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value2"`)}, + }}, + {Event: event, Data: core.DataArray{ + {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value1"`)}, + {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value2"`)}, + }}, + }) + assert.NoError(t, err) + + <-waiter + + mcb.AssertExpectations(t) +} + func TestDeliveryRequestNilMessage(t *testing.T) { wh, cancel := newTestWebHooks(t) defer cancel() @@ -807,7 +1231,7 @@ func TestDeliveryRequestNilMessage(t *testing.T) { }, } - err := wh.DeliveryRequest(mock.Anything, sub, event, nil) + err := wh.DeliveryRequest(wh.ctx, mock.Anything, sub, event, nil) assert.NoError(t, err) mcb.AssertExpectations(t) } @@ -851,7 +1275,52 @@ func TestDeliveryRequestReplyToReply(t *testing.T) { return !response.Rejected // should be accepted as a no-op so we can move on to other events })) - err := wh.DeliveryRequest(mock.Anything, sub, event, nil) + err := wh.DeliveryRequest(wh.ctx, mock.Anything, sub, event, nil) + assert.NoError(t, err) + + mcb.AssertExpectations(t) +} + +func TestBatchDeliveryRequestReplyToReply(t *testing.T) { + wh, cancel := newTestWebHooks(t) + defer cancel() + + yes := true + sub := &core.Subscription{ + SubscriptionRef: core.SubscriptionRef{ + Namespace: "ns1", + }, + Options: core.SubscriptionOptions{ + SubscriptionCoreOptions: core.SubscriptionCoreOptions{ + WithData: &yes, + }, + }, + } + sub.Options.TransportOptions()["reply"] = true + event := &core.EventDelivery{ + EnrichedEvent: core.EnrichedEvent{ + Event: core.Event{ + ID: fftypes.NewUUID(), + }, + Message: &core.Message{ + Header: core.MessageHeader{ + ID: fftypes.NewUUID(), + Type: core.MessageTypeBroadcast, + CID: fftypes.NewUUID(), + }, + }, + }, + Subscription: core.SubscriptionRef{ + ID: sub.ID, + }, + } + + mcb := wh.callbacks.handlers["ns1"].(*eventsmocks.Callbacks) + mcb.On("DeliveryResponse", mock.Anything, mock.MatchedBy(func(response *core.EventDeliveryResponse) bool { + return !response.Rejected // should be accepted as a no-op so we can move on to other events + })) + + err := wh.BatchDeliveryRequest(wh.ctx, mock.Anything, sub, []*core.CombinedEventDataDelivery{{Event: event, Data: nil}}) assert.NoError(t, err) mcb.AssertExpectations(t) @@ -863,3 +1332,147 @@ func TestNamespaceRestarted(t *testing.T) { wh.NamespaceRestarted("ns1", time.Now()) } + +func TestRequestWithBodyReplyEndToEndWithBatch(t *testing.T) { + wh, cancel := newTestWebHooks(t) + defer cancel() + + r := mux.NewRouter() + r.HandleFunc("/myapi", func(res http.ResponseWriter, req *http.Request) { + assert.Equal(t, "myheaderval", req.Header.Get("My-Header")) + assert.Equal(t, "myqueryval", req.URL.Query().Get("my-query")) + var data []fftypes.JSONObject + err := json.NewDecoder(req.Body).Decode(&data) + assert.NoError(t, err) + assert.Equal(t, len(data), 2) + assert.Equal(t, "inputvalue", data[0].GetObject("in_body").GetString("inputfield")) + res.Header().Set("my-reply-header", "myheaderval2") + res.WriteHeader(200) + res.Write([]byte(`{ + "replyfield": "replyvalue" + }`)) + }).Methods(http.MethodPut) + server := httptest.NewServer(r) + defer server.Close() + + yes := true + dataID := fftypes.NewUUID() + msgID := fftypes.NewUUID() + groupHash := fftypes.NewRandB32() + sub := &core.Subscription{ + SubscriptionRef: core.SubscriptionRef{ + Namespace: "ns1", + }, + Options: core.SubscriptionOptions{ + SubscriptionCoreOptions: core.SubscriptionCoreOptions{ + WithData: &yes, + }, + }, + } + to := sub.Options.TransportOptions() + to["reply"] = true + to["json"] = true + to["method"] = "PUT" + to["url"] = fmt.Sprintf("http://%s/myapi", server.Listener.Addr()) + to["headers"] = map[string]interface{}{ + "my-header": "myheaderval", + } + to["query"] = map[string]interface{}{ + "my-query": "myqueryval", + } + event1 := &core.EventDelivery{ + EnrichedEvent: core.EnrichedEvent{ + Event: core.Event{ + ID: fftypes.NewUUID(), + }, + Message: &core.Message{ + Header: core.MessageHeader{ + ID: msgID, + Group: groupHash, + Type: core.MessageTypePrivate, + }, + Data: core.DataRefs{ + {ID: dataID}, + }, + }, + }, + Subscription: core.SubscriptionRef{ + ID: sub.ID, + }, + } + + event2 := &core.EventDelivery{ + EnrichedEvent: core.EnrichedEvent{ + Event: core.Event{ + ID: fftypes.NewUUID(), + }, + Message: &core.Message{ + Header: core.MessageHeader{ + ID: msgID, + Group: groupHash, + Type: core.MessageTypePrivate, + }, + Data: core.DataRefs{ + {ID: dataID}, + }, + }, + }, + Subscription: core.SubscriptionRef{ + ID: sub.ID, + }, + } + + data1 := core.DataArray{&core.Data{ + ID: dataID, + Value: fftypes.JSONAnyPtr(`{ + "in_body": { + "inputfield": "inputvalue" + }, + "in_query": { + "dynamic-query": "dynamicqueryval" + }, + "in_headers": { + "dynamic-header": "dynamicheaderval" + }, + "in_path": "/my/sub/path?escape_query", + "in_replytx": true + }`), + }} + + data2 := core.DataArray{&core.Data{ + ID: dataID, + Value: fftypes.JSONAnyPtr(`{ + "in_body": { + "inputfield": "inputvalue" + }, + "in_query": { + "dynamic-query": "dynamicqueryval" + }, + "in_headers": { + "dynamic-header": "dynamicheaderval" + }, + "in_path": "/my/sub/path?escape_query", + "in_replytx": true + }`), + }} + + mcb := wh.callbacks.handlers["ns1"].(*eventsmocks.Callbacks) + mcb.On("DeliveryResponse", mock.Anything, mock.MatchedBy(func(response *core.EventDeliveryResponse) bool { + assert.Equal(t, *msgID, *response.Reply.Message.Header.CID) + assert.Equal(t, *groupHash, *response.Reply.Message.Header.Group) + assert.Equal(t, core.MessageTypePrivate, response.Reply.Message.Header.Type) + assert.Equal(t, "myheaderval2", response.Reply.InlineData[0].Value.JSONObject().GetObject("headers").GetString("My-Reply-Header")) + assert.Equal(t, "replyvalue", response.Reply.InlineData[0].Value.JSONObject().GetObject("body").GetString("replyfield")) + assert.Equal(t, float64(200), response.Reply.InlineData[0].Value.JSONObject()["status"]) + return true + })).Return(nil) + + err := wh.BatchDeliveryRequest(wh.ctx, mock.Anything, sub, []*core.CombinedEventDataDelivery{{Event: event1, Data: data1}, {Event: event2, Data: data2}}) + assert.NoError(t, err) + + mcb.AssertExpectations(t) +} + +func TestFirstDataNeverNil(t *testing.T) { + assert.NotNil(t, (&whPayload{}).firstData()) +} diff --git a/internal/events/websockets/websocket_connection.go b/internal/events/websockets/websocket_connection.go index 70aa9b50f1..e72d1eff40 100644 --- a/internal/events/websockets/websocket_connection.go +++ b/internal/events/websockets/websocket_connection.go @@ -38,23 +38,25 @@ type websocketStartedSub struct { } type websocketConnection struct { - ctx context.Context - ws *WebSockets - wsConn *websocket.Conn - cancelCtx func() - connID string - sendMessages chan interface{} - senderDone chan struct{} - receiverDone chan struct{} - autoAck bool - started []*websocketStartedSub - inflight []*core.EventDeliveryResponse - mux sync.Mutex - closed bool - remoteAddr string - userAgent string - header http.Header - auth core.Authorizer + ctx context.Context + ws *WebSockets + wsConn *websocket.Conn + cancelCtx func() + connID string + sendMessages chan interface{} + senderDone chan struct{} + receiverDone chan struct{} + autoAck bool + started []*websocketStartedSub + inflight []*core.EventDeliveryResponse + mux sync.Mutex + closed bool + remoteAddr string + userAgent string + header http.Header + auth core.Authorizer + namespaceScoped bool // if true then any request to listen is asserted to be in the context of namespace + namespace string } func newConnection(pCtx context.Context, ws *WebSockets, wsConn *websocket.Conn, req *http.Request, auth core.Authorizer) *websocketConnection { @@ -80,6 +82,18 @@ func newConnection(pCtx context.Context, ws *WebSockets, wsConn *websocket.Conn, return wc } +func (wc *websocketConnection) assertNamespace(namespace string) (string, error) { + + if wc.namespaceScoped { + if namespace == "" { + namespace = wc.namespace + } else if namespace != wc.namespace { + return "", i18n.NewError(wc.ctx, coremsgs.MsgWSWrongNamespace) + } + } + return namespace, nil +} + // processAutoStart gives a helper to specify query parameters to auto-start your subscription func (wc *websocketConnection) processAutoStart(req *http.Request) { query := req.URL.Query() @@ -88,12 +102,18 @@ func (wc *websocketConnection) processAutoStart(req *http.Request) { _, hasName := query["name"] autoAck, hasAutoack := req.URL.Query()["autoack"] isAutoack := hasAutoack && (len(autoAck) == 0 || autoAck[0] != "false") + namespace, err := wc.assertNamespace(query.Get("namespace")) + if err != nil { + wc.protocolError(err) + return + } + if hasEphemeral || hasName { filter := core.NewSubscriptionFilterFromQuery(query) err := wc.handleStart(&core.WSStart{ AutoAck: &isAutoack, Ephemeral: isEphemeral, - Namespace: query.Get("namespace"), + Namespace: namespace, Name: query.Get("name"), Filter: filter, }) @@ -157,7 +177,10 @@ func (wc *websocketConnection) receiveLoop() { var msg core.WSStart err = json.Unmarshal(msgData, &msg) if err == nil { - err = wc.authorizeMessage(msg.Namespace) + msg.Namespace, err = wc.assertNamespace(msg.Namespace) + if err == nil { + err = wc.authorizeMessage(msg.Namespace) + } if err == nil { err = wc.handleStart(&msg) } @@ -251,6 +274,14 @@ func (wc *websocketConnection) restartForNamespace(ns string, startTime time.Tim } func (wc *websocketConnection) handleStart(start *core.WSStart) (err error) { + // this will very likely already be checked before we get here but + // it doesn't do any harm to do a final assertion just in case it hasn't been done yet + + start.Namespace, err = wc.assertNamespace(start.Namespace) + if err != nil { + return err + } + wc.mux.Lock() if start.AutoAck != nil { if *start.AutoAck != wc.autoAck && len(wc.started) > 0 { diff --git a/internal/events/websockets/websockets.go b/internal/events/websockets/websockets.go index 984723bb39..a12a325dd0 100644 --- a/internal/events/websockets/websockets.go +++ b/internal/events/websockets/websockets.go @@ -31,6 +31,10 @@ import ( "github.com/hyperledger/firefly/pkg/events" ) +type WebSocketsNamespaced interface { + ServeHTTPNamespaced(namespace string, res http.ResponseWriter, req *http.Request) +} + type WebSockets struct { ctx context.Context capabilities *events.Capabilities @@ -75,6 +79,10 @@ func (ws *WebSockets) SetAuthorizer(auth core.Authorizer) { func (ws *WebSockets) SetHandler(namespace string, handler events.Callbacks) error { ws.callbacks.writeLock.Lock() defer ws.callbacks.writeLock.Unlock() + if handler == nil { + delete(ws.callbacks.handlers, namespace) + return nil + } ws.callbacks.handlers[namespace] = handler return nil } @@ -83,22 +91,22 @@ func (ws *WebSockets) Capabilities() *events.Capabilities { return ws.capabilities } -func (ws *WebSockets) ValidateOptions(options *core.SubscriptionOptions) error { +func (ws *WebSockets) ValidateOptions(ctx context.Context, options *core.SubscriptionOptions) error { // We don't support streaming the full data over websockets if options.WithData != nil && *options.WithData { - return i18n.NewError(ws.ctx, coremsgs.MsgWebsocketsNoData) + return i18n.NewError(ctx, coremsgs.MsgWebsocketsNoData) } forceFalse := false options.WithData = &forceFalse return nil } -func (ws *WebSockets) DeliveryRequest(connID string, sub *core.Subscription, event *core.EventDelivery, data core.DataArray) error { +func (ws *WebSockets) DeliveryRequest(ctx context.Context, connID string, sub *core.Subscription, event *core.EventDelivery, data core.DataArray) error { ws.connMux.Lock() conn, ok := ws.connections[connID] ws.connMux.Unlock() if !ok { - return i18n.NewError(ws.ctx, coremsgs.MsgWSConnectionNotActive, connID) + return i18n.NewError(ctx, coremsgs.MsgWSConnectionNotActive, connID) } return conn.dispatch(event) } @@ -118,6 +126,25 @@ func (ws *WebSockets) ServeHTTP(res http.ResponseWriter, req *http.Request) { wc.processAutoStart(req) } +func (ws *WebSockets) ServeHTTPNamespaced(namespace string, res http.ResponseWriter, req *http.Request) { + + wsConn, err := ws.upgrader.Upgrade(res, req, nil) + if err != nil { + log.L(ws.ctx).Errorf("WebSocket upgrade failed: %s", err) + return + } + + ws.connMux.Lock() + wc := newConnection(ws.ctx, ws, wsConn, req, ws.auth) + wc.namespaceScoped = true + wc.namespace = namespace + ws.connections[wc.connID] = wc + ws.connMux.Unlock() + + wc.processAutoStart(req) + +} + func (ws *WebSockets) ack(connID string, inflight *core.EventDeliveryResponse) { if cb, ok := ws.callbacks.handlers[inflight.Subscription.Namespace]; ok { cb.DeliveryResponse(connID, inflight) @@ -213,3 +240,8 @@ func (ws *WebSockets) GetStatus() *core.WebSocketStatus { } return status } + +func (ws *WebSockets) BatchDeliveryRequest(ctx context.Context, connID string, sub *core.Subscription, events []*core.CombinedEventDataDelivery) error { + // We should have rejected creation of the subscription, due to us not supporting this in our capabilities + return i18n.NewError(ctx, coremsgs.MsgBatchDeliveryNotSupported, ws.Name()) +} diff --git a/internal/events/websockets/websockets_test.go b/internal/events/websockets/websockets_test.go index c7e595d958..e2f837988b 100644 --- a/internal/events/websockets/websockets_test.go +++ b/internal/events/websockets/websockets_test.go @@ -51,6 +51,18 @@ func (t *testAuthorizer) Authorize(ctx context.Context, authReq *fftypes.AuthReq } func newTestWebsockets(t *testing.T, cbs *eventsmocks.Callbacks, authorizer core.Authorizer, queryParams ...string) (ws *WebSockets, wsc wsclient.WSClient, cancel func()) { + return newTestWebsocketsCommon(t, cbs, authorizer, "", queryParams...) +} + +type testNamespacedHandler struct { + ws *WebSockets + namespace string +} + +func (h *testNamespacedHandler) ServeHTTP(res http.ResponseWriter, req *http.Request) { + h.ws.ServeHTTPNamespaced(h.namespace, res, req) +} +func newTestWebsocketsCommon(t *testing.T, cbs *eventsmocks.Callbacks, authorizer core.Authorizer, namespace string, queryParams ...string) (ws *WebSockets, wsc wsclient.WSClient, cancel func()) { coreconfig.Reset() ws = &WebSockets{} @@ -63,8 +75,16 @@ func newTestWebsockets(t *testing.T, cbs *eventsmocks.Callbacks, authorizer core assert.Equal(t, "websockets", ws.Name()) assert.NotNil(t, ws.Capabilities()) cbs.On("ConnectionClosed", mock.Anything).Return(nil).Maybe() - - svr := httptest.NewServer(ws) + var svr *httptest.Server + if namespace == "" { + svr = httptest.NewServer(ws) + } else { + namespacedHandler := &testNamespacedHandler{ + ws: ws, + namespace: namespace, + } + svr = httptest.NewServer(namespacedHandler) + } clientConfig := config.RootSection("ut.wsclient") wsclient.InitConfig(clientConfig) @@ -94,7 +114,7 @@ func TestValidateOptionsFail(t *testing.T) { defer cancel() yes := true - err := ws.ValidateOptions(&core.SubscriptionOptions{ + err := ws.ValidateOptions(ws.ctx, &core.SubscriptionOptions{ SubscriptionCoreOptions: core.SubscriptionCoreOptions{ WithData: &yes, }, @@ -108,9 +128,13 @@ func TestValidateOptionsOk(t *testing.T) { defer cancel() opts := &core.SubscriptionOptions{} - err := ws.ValidateOptions(opts) + err := ws.ValidateOptions(ws.ctx, opts) assert.NoError(t, err) assert.False(t, *opts.WithData) + + ws.SetHandler("ns1", nil) + assert.Empty(t, ws.callbacks.handlers) + } func TestSendBadData(t *testing.T) { @@ -190,7 +214,7 @@ func TestStartReceiveAckEphemeral(t *testing.T) { assert.NoError(t, err) <-waitSubscribed - ws.DeliveryRequest(connID, nil, &core.EventDelivery{ + ws.DeliveryRequest(ws.ctx, connID, nil, &core.EventDelivery{ EnrichedEvent: core.EnrichedEvent{ Event: core.Event{ID: fftypes.NewUUID()}, }, @@ -243,7 +267,7 @@ func TestStartReceiveDurable(t *testing.T) { assert.NoError(t, err) <-waitSubscribed - ws.DeliveryRequest(connID, nil, &core.EventDelivery{ + ws.DeliveryRequest(ws.ctx, connID, nil, &core.EventDelivery{ EnrichedEvent: core.EnrichedEvent{ Event: core.Event{ID: fftypes.NewUUID()}, }, @@ -254,7 +278,7 @@ func TestStartReceiveDurable(t *testing.T) { }, }, nil) // Put a second in flight - ws.DeliveryRequest(connID, nil, &core.EventDelivery{ + ws.DeliveryRequest(ws.ctx, connID, nil, &core.EventDelivery{ EnrichedEvent: core.EnrichedEvent{ Event: core.Event{ID: fftypes.NewUUID()}, }, @@ -323,7 +347,7 @@ func TestStartReceiveDurableWithAuth(t *testing.T) { assert.NoError(t, err) <-waitSubscribed - ws.DeliveryRequest(connID, nil, &core.EventDelivery{ + ws.DeliveryRequest(ws.ctx, connID, nil, &core.EventDelivery{ EnrichedEvent: core.EnrichedEvent{ Event: core.Event{ID: fftypes.NewUUID()}, }, @@ -334,7 +358,7 @@ func TestStartReceiveDurableWithAuth(t *testing.T) { }, }, nil) // Put a second in flight - ws.DeliveryRequest(connID, nil, &core.EventDelivery{ + ws.DeliveryRequest(ws.ctx, connID, nil, &core.EventDelivery{ EnrichedEvent: core.EnrichedEvent{ Event: core.Event{ID: fftypes.NewUUID()}, }, @@ -432,7 +456,7 @@ func TestAutoStartReceiveAckEphemeral(t *testing.T) { defer cancel() <-waitSubscribed - ws.DeliveryRequest(connID, nil, &core.EventDelivery{ + ws.DeliveryRequest(ws.ctx, connID, nil, &core.EventDelivery{ EnrichedEvent: core.EnrichedEvent{ Event: core.Event{ID: fftypes.NewUUID()}, }, @@ -646,7 +670,7 @@ func TestWebsocketDispatchAfterClose(t *testing.T) { ctx: context.Background(), connections: make(map[string]*websocketConnection), } - err := ws.DeliveryRequest("gone", nil, &core.EventDelivery{}, nil) + err := ws.DeliveryRequest(ws.ctx, "gone", nil, &core.EventDelivery{}, nil) assert.Regexp(t, "FF10173", err) } @@ -670,7 +694,7 @@ func TestDispatchAutoAck(t *testing.T) { autoAck: true, } wsc.ws.connections[wsc.connID] = wsc - err := wsc.ws.DeliveryRequest(wsc.connID, nil, &core.EventDelivery{ + err := wsc.ws.DeliveryRequest(wsc.ctx, wsc.connID, nil, &core.EventDelivery{ EnrichedEvent: core.EnrichedEvent{ Event: core.Event{ID: fftypes.NewUUID()}, }, @@ -801,3 +825,159 @@ func TestNamespaceRestartedFailClose(t *testing.T) { mcb.AssertExpectations(t) } + +func TestEventDeliveryBatchReturnsUnsupported(t *testing.T) { + cbs := &eventsmocks.Callbacks{} + ws, _, cancel := newTestWebsockets(t, cbs, nil) + defer cancel() + + sub := &core.Subscription{ + SubscriptionRef: core.SubscriptionRef{ + Namespace: "ns1", + }, + } + + err := ws.BatchDeliveryRequest(ws.ctx, "id", sub, []*core.CombinedEventDataDelivery{}) + assert.Regexp(t, "FF10461", err) +} + +func TestNamespaceScopedSendWrongNamespaceStartAction(t *testing.T) { + cbs := &eventsmocks.Callbacks{} + _, wsc, cancel := newTestWebsocketsCommon(t, cbs, nil, "ns1") + defer cancel() + cbs.On("ConnectionClosed", mock.Anything).Return(nil) + + err := wsc.Send(context.Background(), []byte(`{"type":"start","namespace":"ns2"}`)) + assert.NoError(t, err) + b := <-wsc.Receive() + var res core.WSError + err = json.Unmarshal(b, &res) + assert.NoError(t, err) + assert.Equal(t, core.WSProtocolErrorEventType, res.Type) + assert.Regexp(t, "FF10462", res.Error) +} + +func TestNamespaceScopedSendWrongNamespaceQueryParameter(t *testing.T) { + cbs := &eventsmocks.Callbacks{} + _, wsc, cancel := newTestWebsocketsCommon(t, cbs, nil, "ns1", "namespace=ns2") + defer cancel() + cbs.On("ConnectionClosed", mock.Anything).Return(nil) + + b := <-wsc.Receive() + var res core.WSError + err := json.Unmarshal(b, &res) + assert.NoError(t, err) + assert.Equal(t, core.WSProtocolErrorEventType, res.Type) + assert.Regexp(t, "FF10462", res.Error) +} + +func TestNamespaceScopedUpgradeFail(t *testing.T) { + cbs := &eventsmocks.Callbacks{} + _, wsc, cancel := newTestWebsocketsCommon(t, cbs, nil, "ns1") + defer cancel() + + u, _ := url.Parse(wsc.URL()) + u.Scheme = "http" + res, err := http.Get(u.String()) + assert.NoError(t, err) + assert.Equal(t, 400, res.StatusCode) + +} + +func TestNamespaceScopedSuccess(t *testing.T) { + cbs := &eventsmocks.Callbacks{} + ws, wsc, cancel := newTestWebsocketsCommon(t, cbs, nil, "ns1") + defer cancel() + var connID string + sub := cbs.On("RegisterConnection", + mock.MatchedBy(func(s string) bool { connID = s; return true }), + mock.MatchedBy(func(subMatch events.SubscriptionMatcher) bool { + return subMatch(core.SubscriptionRef{Namespace: "ns1", Name: "sub1"}) && + !subMatch(core.SubscriptionRef{Namespace: "ns2", Name: "sub1"}) && + !subMatch(core.SubscriptionRef{Namespace: "ns1", Name: "sub2"}) + }), + ).Return(nil) + ack := cbs.On("DeliveryResponse", + mock.MatchedBy(func(s string) bool { return s == connID }), + mock.Anything).Return(nil) + + waitSubscribed := make(chan struct{}) + sub.RunFn = func(a mock.Arguments) { + close(waitSubscribed) + } + + waitAcked := make(chan struct{}) + ack.RunFn = func(a mock.Arguments) { + close(waitAcked) + } + + err := wsc.Send(context.Background(), []byte(`{"type":"start","name":"sub1"}`)) + assert.NoError(t, err) + + <-waitSubscribed + ws.DeliveryRequest(ws.ctx, connID, nil, &core.EventDelivery{ + EnrichedEvent: core.EnrichedEvent{ + Event: core.Event{ID: fftypes.NewUUID()}, + }, + Subscription: core.SubscriptionRef{ + ID: fftypes.NewUUID(), + Namespace: "ns1", + Name: "sub1", + }, + }, nil) + // Put a second in flight + ws.DeliveryRequest(ws.ctx, connID, nil, &core.EventDelivery{ + EnrichedEvent: core.EnrichedEvent{ + Event: core.Event{ID: fftypes.NewUUID()}, + }, + Subscription: core.SubscriptionRef{ + ID: fftypes.NewUUID(), + Namespace: "ns1", + Name: "sub2", + }, + }, nil) + + b := <-wsc.Receive() + var res core.EventDelivery + err = json.Unmarshal(b, &res) + assert.NoError(t, err) + + assert.Equal(t, "ns1", res.Subscription.Namespace) + assert.Equal(t, "sub1", res.Subscription.Name) + err = wsc.Send(context.Background(), []byte(fmt.Sprintf(`{ + "type":"ack", + "id": "%s", + "subscription": { + "namespace": "ns1", + "name": "sub1" + } + }`, res.ID))) + assert.NoError(t, err) + + <-waitAcked + + // Check we left the right one behind + conn := ws.connections[connID] + assert.Equal(t, 1, len(conn.inflight)) + assert.Equal(t, "sub2", conn.inflight[0].Subscription.Name) + + cbs.AssertExpectations(t) +} + +func TestHandleStartWrongNamespace(t *testing.T) { + + // it is not currently possible through exported functions to get to handleStart with the wrong namespace + // but we like to have a final assertion in there as a safety net for accidentaly data leakage across namespaces + // so to prove that safety net, we need to drive the private function handleStart directly. + wc := &websocketConnection{ + ctx: context.Background(), + namespaceScoped: true, + namespace: "ns1", + } + startMessage := &core.WSStart{ + Namespace: "ns2", + } + err := wc.handleStart(startMessage) + assert.Error(t, err) + assert.Regexp(t, "FF10462", err) +} diff --git a/internal/identity/identitymanager.go b/internal/identity/identitymanager.go index bdc9d5dfff..c20b19e2b4 100644 --- a/internal/identity/identitymanager.go +++ b/internal/identity/identitymanager.go @@ -44,14 +44,15 @@ type Manager interface { ResolveInputSigningKey(ctx context.Context, inputKey string, keyNormalizationMode int) (signingKey string, err error) ResolveQuerySigningKey(ctx context.Context, inputKey string, keyNormalizationMode int) (signingKey string, err error) ResolveIdentitySigner(ctx context.Context, identity *core.Identity) (parentSigner *core.SignerRef, err error) + ResolveMultipartyRootVerifier(ctx context.Context) (*core.VerifierRef, error) FindIdentityForVerifier(ctx context.Context, iTypes []core.IdentityType, verifier *core.VerifierRef) (identity *core.Identity, err error) CachedIdentityLookupByID(ctx context.Context, id *fftypes.UUID) (identity *core.Identity, err error) CachedIdentityLookupMustExist(ctx context.Context, did string) (identity *core.Identity, retryable bool, err error) CachedIdentityLookupNilOK(ctx context.Context, did string) (identity *core.Identity, retryable bool, err error) - GetMultipartyRootVerifier(ctx context.Context) (*core.VerifierRef, error) - GetMultipartyRootOrg(ctx context.Context) (*core.Identity, error) GetLocalNode(ctx context.Context) (node *core.Identity, err error) + GetRootOrgDID(ctx context.Context) (string, error) + GetRootOrg(ctx context.Context) (org *core.Identity, err error) VerifyIdentityChain(ctx context.Context, identity *core.Identity) (immediateParent *core.Identity, retryable bool, err error) ValidateNodeOwner(ctx context.Context, node *core.Identity, identity *core.Identity) (valid bool, err error) } @@ -104,11 +105,33 @@ func ParseKeyNormalizationConfig(strConfigVal string) int { func (im *identityManager) GetLocalNode(ctx context.Context) (node *core.Identity, err error) { nodeName := im.multiparty.LocalNode().Name - nodeDID := fmt.Sprintf("%s%s", core.FireFlyNodeDIDPrefix, nodeName) - node, _, err = im.CachedIdentityLookupNilOK(ctx, nodeDID) + if nodeName != "" { + nodeDID := fmt.Sprintf("%s%s", core.FireFlyNodeDIDPrefix, nodeName) + node, _, err = im.CachedIdentityLookupNilOK(ctx, nodeDID) + } + if err == nil && node == nil { + return nil, i18n.NewError(ctx, coremsgs.MsgLocalNodeNotSet) + } return node, err } +func (im *identityManager) GetRootOrgDID(ctx context.Context) (string, error) { + orgName := im.multiparty.RootOrg().Name + if orgName != "" { + orgDID := fmt.Sprintf("%s%s", core.FireFlyOrgDIDPrefix, orgName) + return orgDID, nil + } + return "", i18n.NewError(ctx, coremsgs.MsgLocalOrgNotSet) +} + +func (im *identityManager) GetRootOrg(ctx context.Context) (org *core.Identity, err error) { + orgDID, err := im.GetRootOrgDID(ctx) + if err == nil { + org, _, err = im.CachedIdentityLookupMustExist(ctx, orgDID) + } + return org, err +} + // ResolveInputSigningKey takes in only a "key" (which may be empty to use the default) to be resolved and returned. // This is for cases where keys are used directly without an "author" field alongside them (custom contracts, tokens), // or when the author is known by the caller and should not / cannot be confirmed prior to sending (identity claims) @@ -273,7 +296,7 @@ func (im *identityManager) resolveDefaultSigningIdentity(ctx context.Context, si if err != nil { return err } - identity, err := im.GetMultipartyRootOrg(ctx) + identity, err := im.GetRootOrg(ctx) if err != nil { return err } @@ -297,9 +320,9 @@ func (im *identityManager) getDefaultVerifier(ctx context.Context, intent blockc return nil, i18n.NewError(ctx, coremsgs.MsgNodeMissingBlockchainKey) } -// GetMultipartyRootVerifier gets the blockchain verifier of the root org via the configuration, +// ResolveMultipartyRootVerifier gets the blockchain verifier of the root org via the configuration, // resolving it for use as a signing key for the purpose of signing a child identity -func (im *identityManager) GetMultipartyRootVerifier(ctx context.Context) (*core.VerifierRef, error) { +func (im *identityManager) ResolveMultipartyRootVerifier(ctx context.Context) (*core.VerifierRef, error) { orgKey := im.multiparty.RootOrg().Key if orgKey == "" { return nil, i18n.NewError(ctx, coremsgs.MsgNodeMissingBlockchainKey) @@ -339,25 +362,6 @@ func (im *identityManager) FindIdentityForVerifier(ctx context.Context, iTypes [ return nil, nil } -// GetMultipartyRootOrg returns the identity of the organization that owns the node, if fully registered within the given namespace -func (im *identityManager) GetMultipartyRootOrg(ctx context.Context) (*core.Identity, error) { - verifierRef, err := im.GetMultipartyRootVerifier(ctx) - if err != nil { - return nil, err - } - - orgName := im.multiparty.RootOrg().Name - identity, err := im.cachedIdentityLookupByVerifierRef(ctx, im.namespace, verifierRef) - if err != nil || identity == nil { - return nil, i18n.WrapError(ctx, err, coremsgs.MsgLocalOrgLookupFailed, orgName, verifierRef.Value) - } - // Confirm that the specified blockchain key is associated with the correct org - if identity.Type != core.IdentityTypeOrg || identity.Name != orgName { - return nil, i18n.NewError(ctx, coremsgs.MsgLocalOrgLookupFailed, orgName, verifierRef.Value) - } - return identity, nil -} - func (im *identityManager) VerifyIdentityChain(ctx context.Context, checkIdentity *core.Identity) (immediateParent *core.Identity, retryable bool, err error) { err = checkIdentity.Validate(ctx) diff --git a/internal/identity/identitymanager_test.go b/internal/identity/identitymanager_test.go index 9db0844f8a..a9c111a11e 100644 --- a/internal/identity/identitymanager_test.go +++ b/internal/identity/identitymanager_test.go @@ -211,6 +211,47 @@ func TestResolveInputSigningIdentityNoKey(t *testing.T) { } +func TestResolveInputSigningIdentityNoRootOrg(t *testing.T) { + + ctx, im := newTestIdentityManager(t) + + mmp := im.multiparty.(*multipartymocks.Manager) + mmp.On("RootOrg").Return(multiparty.RootOrg{Key: "key123"}) + + mbi := im.blockchain.(*blockchainmocks.Plugin) + mbi.On("ResolveSigningKey", ctx, "key123", blockchain.ResolveKeyIntentSign).Return("fullkey123", nil) + + msgIdentity := &core.SignerRef{} + err := im.ResolveInputSigningIdentity(ctx, msgIdentity) + assert.Regexp(t, "FF10281", err) + + mbi.AssertExpectations(t) + mmp.AssertExpectations(t) + +} + +func TestResolveInputSigningIdentityRootOrgError(t *testing.T) { + + ctx, im := newTestIdentityManager(t) + + mmp := im.multiparty.(*multipartymocks.Manager) + mmp.On("RootOrg").Return(multiparty.RootOrg{Name: "org1", Key: "key123"}) + + mbi := im.blockchain.(*blockchainmocks.Plugin) + mbi.On("ResolveSigningKey", ctx, "key123", blockchain.ResolveKeyIntentSign).Return("fullkey123", nil) + + mdi := im.database.(*databasemocks.Plugin) + mdi.On("GetIdentityByDID", ctx, "ns1", "did:firefly:org/org1").Return(nil, fmt.Errorf("pop")) + + msgIdentity := &core.SignerRef{} + err := im.ResolveInputSigningIdentity(ctx, msgIdentity) + assert.Regexp(t, "pop", err) + + mbi.AssertExpectations(t) + mmp.AssertExpectations(t) + +} + func TestResolveInputSigningIdentityOrgFallbackOk(t *testing.T) { ctx, im := newTestIdentityManager(t) @@ -224,16 +265,7 @@ func TestResolveInputSigningIdentityOrgFallbackOk(t *testing.T) { orgID := fftypes.NewUUID() mdi := im.database.(*databasemocks.Plugin) - mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, "ns1", "fullkey123"). - Return((&core.Verifier{ - Identity: orgID, - Namespace: "ns1", - VerifierRef: core.VerifierRef{ - Type: core.VerifierTypeEthAddress, - Value: "fullkey123", - }, - }).Seal(), nil) - mdi.On("GetIdentityByID", ctx, "ns1", orgID). + mdi.On("GetIdentityByDID", ctx, "ns1", "did:firefly:org/org1"). Return(&core.Identity{ IdentityBase: core.IdentityBase{ ID: orgID, @@ -774,75 +806,21 @@ func TestFirstVerifierForIdentityNotFound(t *testing.T) { } -func TestResolveDefaultSigningIdentityNotFound(t *testing.T) { - - ctx, im := newTestIdentityManager(t) - - mbi := im.blockchain.(*blockchainmocks.Plugin) - mmp := im.multiparty.(*multipartymocks.Manager) - mmp.On("GetNetworkVersion").Return(1) - mmp.On("RootOrg").Return(multiparty.RootOrg{ - Key: "key12345", - }) - mbi.On("ResolveSigningKey", ctx, "key12345", blockchain.ResolveKeyIntentSign).Return("key12345", nil) - - mdi := im.database.(*databasemocks.Plugin) - mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, "ns1", "key12345").Return(nil, nil) - mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, core.LegacySystemNamespace, "key12345").Return(nil, nil) - - err := im.resolveDefaultSigningIdentity(ctx, &core.SignerRef{}) - assert.Regexp(t, "FF10281", err) - - mbi.AssertExpectations(t) - mdi.AssertExpectations(t) - mmp.AssertExpectations(t) - -} - -func TestResolveDefaultSigningIdentitySystemFallback(t *testing.T) { +func TestResolveMultipartyRootVerifierKeyUnset(t *testing.T) { ctx, im := newTestIdentityManager(t) - id := &core.Identity{ - IdentityBase: core.IdentityBase{ - ID: fftypes.NewUUID(), - DID: "did:firefly:org/org1", - Namespace: "ns1", - Name: "org1", - Type: core.IdentityTypeOrg, - }, - } - verifier := &core.Verifier{ - Identity: id.ID, - VerifierRef: core.VerifierRef{ - Value: "key12345", - }, - } - - mbi := im.blockchain.(*blockchainmocks.Plugin) mmp := im.multiparty.(*multipartymocks.Manager) - mmp.On("GetNetworkVersion").Return(1) - mmp.On("RootOrg").Return(multiparty.RootOrg{Name: "org1", Key: "key12345"}) - mbi.On("ResolveSigningKey", ctx, "key12345", blockchain.ResolveKeyIntentSign).Return("key12345", nil) - - mdi := im.database.(*databasemocks.Plugin) - mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, "ns1", "key12345").Return(nil, nil) - mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, core.LegacySystemNamespace, "key12345").Return(verifier, nil) - mdi.On("GetIdentityByID", ctx, core.LegacySystemNamespace, id.ID).Return(id, nil) + mmp.On("RootOrg").Return(multiparty.RootOrg{}) - ref := &core.SignerRef{} - err := im.resolveDefaultSigningIdentity(ctx, ref) - assert.NoError(t, err) - assert.Equal(t, "did:firefly:org/org1", ref.Author) - assert.Equal(t, "key12345", ref.Key) + _, err := im.ResolveMultipartyRootVerifier(ctx) + assert.Regexp(t, "FF10354", err) - mbi.AssertExpectations(t) - mdi.AssertExpectations(t) mmp.AssertExpectations(t) } -func TestGetMultipartyRootVerifierResolveFailed(t *testing.T) { +func TestResolveMultipartyRootVerifierResolveFailed(t *testing.T) { ctx, im := newTestIdentityManager(t) @@ -852,7 +830,7 @@ func TestGetMultipartyRootVerifierResolveFailed(t *testing.T) { mbi := im.blockchain.(*blockchainmocks.Plugin) mbi.On("ResolveSigningKey", ctx, "0x12345", blockchain.ResolveKeyIntentSign).Return("", fmt.Errorf("pop")) - _, err := im.GetMultipartyRootVerifier(ctx) + _, err := im.ResolveMultipartyRootVerifier(ctx) assert.Regexp(t, "pop", err) mbi.AssertExpectations(t) @@ -860,60 +838,6 @@ func TestGetMultipartyRootVerifierResolveFailed(t *testing.T) { } -func TestGetMultipartyRootVerifierNotSet(t *testing.T) { - - ctx, im := newTestIdentityManager(t) - - mmp := im.multiparty.(*multipartymocks.Manager) - mmp.On("RootOrg").Return(multiparty.RootOrg{}) - - _, err := im.GetMultipartyRootOrg(ctx) - assert.Regexp(t, "FF10354", err) - - mmp.AssertExpectations(t) - -} - -func TestGetMultipartyRootOrgMismatch(t *testing.T) { - - ctx, im := newTestIdentityManager(t) - - mmp := im.multiparty.(*multipartymocks.Manager) - mmp.On("RootOrg").Return(multiparty.RootOrg{ - Key: "key12345", - }) - mbi := im.blockchain.(*blockchainmocks.Plugin) - mbi.On("ResolveSigningKey", ctx, "key12345", blockchain.ResolveKeyIntentSign).Return("fullkey123", nil) - - orgID := fftypes.NewUUID() - mdi := im.database.(*databasemocks.Plugin) - mdi.On("GetVerifierByValue", ctx, core.VerifierTypeEthAddress, "ns1", "fullkey123"). - Return((&core.Verifier{ - Identity: orgID, - Namespace: "ns1", - VerifierRef: core.VerifierRef{ - Type: core.VerifierTypeEthAddress, - Value: "fullkey123", - }, - }).Seal(), nil) - mdi.On("GetIdentityByID", ctx, "ns1", orgID). - Return(&core.Identity{ - IdentityBase: core.IdentityBase{ - ID: orgID, - DID: "did:firefly:org/org2", - Namespace: "ns1", - Name: "org2", - Type: core.IdentityTypeOrg, - }, - }, nil) - - _, err := im.GetMultipartyRootOrg(ctx) - assert.Regexp(t, "FF10281", err) - - mmp.AssertExpectations(t) - -} - func TestCachedIdentityLookupByVerifierRefCaching(t *testing.T) { ctx, im := newTestIdentityManager(t) @@ -1509,6 +1433,61 @@ func TestGetLocalNode(t *testing.T) { mdi.AssertExpectations(t) } +func TestGetLocalNodeNotSet(t *testing.T) { + ctx, im := newTestIdentityManager(t) + mmp := im.multiparty.(*multipartymocks.Manager) + + mmp.On("LocalNode").Return(multiparty.LocalNode{}) + + _, err := im.GetLocalNode(ctx) + assert.Regexp(t, "FF10225", err) + + mmp.AssertExpectations(t) +} + +func TestGetRootOrg(t *testing.T) { + ctx, im := newTestIdentityManager(t) + mmp := im.multiparty.(*multipartymocks.Manager) + mdi := im.database.(*databasemocks.Plugin) + + org := &core.Identity{ + IdentityBase: core.IdentityBase{ + ID: fftypes.NewUUID(), + }, + } + + mmp.On("RootOrg").Return(multiparty.RootOrg{Name: "org1"}).Twice() + mdi.On("GetIdentityByDID", ctx, "ns1", "did:firefly:org/org1").Return(org, nil).Once() + + result, err := im.GetRootOrg(ctx) + assert.NoError(t, err) + assert.Equal(t, org, result) + + // second call is cached + result, err = im.GetRootOrg(ctx) + assert.NoError(t, err) + assert.Equal(t, org, result) + + mmp.AssertExpectations(t) + mdi.AssertExpectations(t) +} + +func TestGetRootOrgUnregistered(t *testing.T) { + ctx, im := newTestIdentityManager(t) + mmp := im.multiparty.(*multipartymocks.Manager) + mdi := im.database.(*databasemocks.Plugin) + + mmp.On("RootOrg").Return(multiparty.RootOrg{Name: "org1"}) + mdi.On("GetIdentityByDID", ctx, "ns1", "did:firefly:org/org1").Return(nil, nil) + mmp.On("GetNetworkVersion").Return(2) + + _, err := im.GetRootOrg(ctx) + assert.Regexp(t, "FF10277", err) + + mmp.AssertExpectations(t) + mdi.AssertExpectations(t) +} + func TestParseKeyNormalizationConfig(t *testing.T) { assert.Equal(t, KeyNormalizationBlockchainPlugin, ParseKeyNormalizationConfig("blockchain_Plugin")) assert.Equal(t, KeyNormalizationNone, ParseKeyNormalizationConfig("none")) diff --git a/internal/metrics/private_msg.go b/internal/metrics/private_msg.go index 1f3c7cea96..de654e3a88 100644 --- a/internal/metrics/private_msg.go +++ b/internal/metrics/private_msg.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -35,6 +35,8 @@ var PrivateMsgConfirmedCounterName = "ff_private_msg_confirmed_total" var PrivateMsgRejectedCounterName = "ff_private_msg_rejected_total" // PrivateMsgHistogramName is the prometheus metric for tracking the total number of private messages - histogram +// +//nolint:gosec var PrivateMsgHistogramName = "ff_private_msg_histogram" func InitPrivateMsgMetrics() { diff --git a/internal/multiparty/manager.go b/internal/multiparty/manager.go index 3fc3277f41..b0c7e1a615 100644 --- a/internal/multiparty/manager.go +++ b/internal/multiparty/manager.go @@ -56,14 +56,14 @@ type Manager interface { GetNetworkVersion() int // SubmitBatchPin sequences a batch of message globally to all viewers of a given ledger - SubmitBatchPin(ctx context.Context, batch *core.BatchPersisted, contexts []*fftypes.Bytes32, payloadRef string) error + SubmitBatchPin(ctx context.Context, batch *core.BatchPersisted, contexts []*fftypes.Bytes32, payloadRef string, idempotentSubmit bool) error // SubmitNetworkAction writes a special "BatchPin" event which signals the plugin to take an action - SubmitNetworkAction(ctx context.Context, signingKey string, action *core.NetworkAction) error + SubmitNetworkAction(ctx context.Context, signingKey string, action *core.NetworkAction, idempotentSubmit bool) error // From operations.OperationHandler PrepareOperation(ctx context.Context, op *core.Operation) (*core.PreparedOperation, error) - RunOperation(ctx context.Context, op *core.PreparedOperation) (outputs fftypes.JSONObject, complete bool, err error) + RunOperation(ctx context.Context, op *core.PreparedOperation) (outputs fftypes.JSONObject, phase core.OpPhase, err error) } type Config struct { @@ -204,7 +204,7 @@ func (mm *multipartyManager) GetNetworkVersion() int { return mm.namespace.Contracts.Active.Info.Version } -func (mm *multipartyManager) SubmitNetworkAction(ctx context.Context, signingKey string, action *core.NetworkAction) error { +func (mm *multipartyManager) SubmitNetworkAction(ctx context.Context, signingKey string, action *core.NetworkAction, idempotentSubmit bool) error { if action.Type != core.NetworkActionTerminate { return i18n.NewError(ctx, coremsgs.MsgUnrecognizedNetworkAction, action.Type) } @@ -224,7 +224,7 @@ func (mm *multipartyManager) SubmitNetworkAction(ctx context.Context, signingKey return err } - _, err = mm.operations.RunOperation(ctx, opNetworkAction(op, action.Type, signingKey)) + _, err = mm.operations.RunOperation(ctx, opNetworkAction(op, action.Type, signingKey), idempotentSubmit) return err } @@ -244,13 +244,13 @@ func (mm *multipartyManager) prepareInvokeOperation(ctx context.Context, batch * }), nil } -func (mm *multipartyManager) SubmitBatchPin(ctx context.Context, batch *core.BatchPersisted, contexts []*fftypes.Bytes32, payloadRef string) error { +func (mm *multipartyManager) SubmitBatchPin(ctx context.Context, batch *core.BatchPersisted, contexts []*fftypes.Bytes32, payloadRef string, idempotentSubmit bool) error { if batch.TX.Type == core.TransactionTypeContractInvokePin { preparedOp, err := mm.prepareInvokeOperation(ctx, batch, contexts, payloadRef) if err != nil { return err } else if preparedOp != nil { - _, err = mm.operations.RunOperation(ctx, preparedOp) + _, err = mm.operations.RunOperation(ctx, preparedOp, idempotentSubmit) return err } log.L(ctx).Warnf("No invoke operation found on transaction %s", batch.TX.ID) @@ -269,6 +269,6 @@ func (mm *multipartyManager) SubmitBatchPin(ctx context.Context, batch *core.Bat if mm.metrics.IsMetricsEnabled() { mm.metrics.CountBatchPin() } - _, err := mm.operations.RunOperation(ctx, opBatchPin(op, batch, contexts, payloadRef)) + _, err := mm.operations.RunOperation(ctx, opBatchPin(op, batch, contexts, payloadRef), idempotentSubmit) return err } diff --git a/internal/multiparty/manager_test.go b/internal/multiparty/manager_test.go index f9dc8e885f..8dfc1218ce 100644 --- a/internal/multiparty/manager_test.go +++ b/internal/multiparty/manager_test.go @@ -275,11 +275,11 @@ func TestSubmitNetworkAction(t *testing.T) { mp.mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(networkActionData) return op.Type == core.OpTypeBlockchainNetworkAction && data.Type == core.NetworkActionTerminate - })).Return(nil, nil) + }), false).Return(nil, nil) err := mp.ConfigureContract(context.Background()) assert.NoError(t, err) - err = mp.SubmitNetworkAction(context.Background(), "0x123", &core.NetworkAction{Type: core.NetworkActionTerminate}) + err = mp.SubmitNetworkAction(context.Background(), "0x123", &core.NetworkAction{Type: core.NetworkActionTerminate}, false) assert.Nil(t, err) mp.mbi.AssertExpectations(t) @@ -310,7 +310,7 @@ func TestSubmitNetworkActionTXFail(t *testing.T) { err := mp.ConfigureContract(context.Background()) assert.NoError(t, err) - err = mp.SubmitNetworkAction(context.Background(), "0x123", &core.NetworkAction{Type: core.NetworkActionTerminate}) + err = mp.SubmitNetworkAction(context.Background(), "0x123", &core.NetworkAction{Type: core.NetworkActionTerminate}, false) assert.EqualError(t, err, "pop") mp.mbi.AssertExpectations(t) @@ -343,7 +343,7 @@ func TestSubmitNetworkActionOpFail(t *testing.T) { err := mp.ConfigureContract(context.Background()) assert.NoError(t, err) - err = mp.SubmitNetworkAction(context.Background(), "0x123", &core.NetworkAction{Type: core.NetworkActionTerminate}) + err = mp.SubmitNetworkAction(context.Background(), "0x123", &core.NetworkAction{Type: core.NetworkActionTerminate}, false) assert.EqualError(t, err, "pop") mp.mbi.AssertExpectations(t) @@ -373,7 +373,7 @@ func TestSubmitNetworkActionBadType(t *testing.T) { err := mp.ConfigureContract(context.Background()) assert.NoError(t, err) - err = mp.SubmitNetworkAction(context.Background(), "0x123", &core.NetworkAction{Type: "BAD"}) + err = mp.SubmitNetworkAction(context.Background(), "0x123", &core.NetworkAction{Type: "BAD"}, false) assert.Regexp(t, "FF10397", err) mp.mbi.AssertExpectations(t) @@ -410,9 +410,9 @@ func TestSubmitBatchPinOk(t *testing.T) { mp.mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(txcommon.BatchPinData) return op.Type == core.OpTypeBlockchainPinBatch && data.Batch == batch - })).Return(nil, nil) + }), false).Return(nil, nil) - err := mp.SubmitBatchPin(ctx, batch, contexts, "payload1") + err := mp.SubmitBatchPin(ctx, batch, contexts, "payload1", false) assert.NoError(t, err) } @@ -448,9 +448,9 @@ func TestSubmitPinnedBatchWithMetricsOk(t *testing.T) { mp.mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(txcommon.BatchPinData) return op.Type == core.OpTypeBlockchainPinBatch && data.Batch == batch - })).Return(nil, nil) + }), false).Return(nil, nil) - err := mp.SubmitBatchPin(ctx, batch, contexts, "payload1") + err := mp.SubmitBatchPin(ctx, batch, contexts, "payload1", false) assert.NoError(t, err) } @@ -483,9 +483,9 @@ func TestSubmitBatchPinWithBatchOk(t *testing.T) { assert.Equal(t, contexts, data.BatchPin.Contexts) assert.Equal(t, "payload1", data.BatchPin.PayloadRef) return op.Type == core.OpTypeBlockchainInvoke && data.BatchPin.Batch == batch - })).Return(nil, nil) + }), false).Return(nil, nil) - err := mp.SubmitBatchPin(ctx, batch, contexts, "payload1") + err := mp.SubmitBatchPin(ctx, batch, contexts, "payload1", false) assert.NoError(t, err) } @@ -511,7 +511,7 @@ func TestSubmitBatchPinWithBatchOpFailure(t *testing.T) { mp.mth.On("FindOperationInTransaction", ctx, batch.TX.ID, core.OpTypeBlockchainInvoke).Return(nil, fmt.Errorf("pop")) - err := mp.SubmitBatchPin(ctx, batch, contexts, "payload1") + err := mp.SubmitBatchPin(ctx, batch, contexts, "payload1", false) assert.EqualError(t, err, "pop") } @@ -543,7 +543,7 @@ func TestSubmitBatchPinWithBatchOpMalformed(t *testing.T) { mp.mth.On("FindOperationInTransaction", ctx, batch.TX.ID, core.OpTypeBlockchainInvoke).Return(invokeOp, nil) - err := mp.SubmitBatchPin(ctx, batch, contexts, "payload1") + err := mp.SubmitBatchPin(ctx, batch, contexts, "payload1", false) assert.Regexp(t, "FF00127", err) } @@ -581,9 +581,9 @@ func TestSubmitBatchPinWithBatchOpNotFound(t *testing.T) { mp.mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(txcommon.BatchPinData) return op.Type == core.OpTypeBlockchainPinBatch && data.Batch == batch - })).Return(nil, nil) + }), false).Return(nil, nil) - err := mp.SubmitBatchPin(ctx, batch, contexts, "payload1") + err := mp.SubmitBatchPin(ctx, batch, contexts, "payload1", false) assert.NoError(t, err) } @@ -608,7 +608,7 @@ func TestSubmitPinnedBatchOpFail(t *testing.T) { mp.mbi.On("Name").Return("ut") mp.mom.On("AddOrReuseOperation", ctx, mock.Anything).Return(fmt.Errorf("pop")) - err := mp.SubmitBatchPin(ctx, batch, contexts, "payload1") + err := mp.SubmitBatchPin(ctx, batch, contexts, "payload1", false) assert.Regexp(t, "pop", err) } diff --git a/internal/multiparty/operations.go b/internal/multiparty/operations.go index d2267613d3..945e7aca27 100644 --- a/internal/multiparty/operations.go +++ b/internal/multiparty/operations.go @@ -22,6 +22,7 @@ import ( "github.com/hyperledger/firefly-common/pkg/fftypes" "github.com/hyperledger/firefly-common/pkg/i18n" "github.com/hyperledger/firefly/internal/coremsgs" + "github.com/hyperledger/firefly/internal/operations" "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/pkg/blockchain" "github.com/hyperledger/firefly/pkg/core" @@ -98,25 +99,25 @@ func (mm *multipartyManager) PrepareOperation(ctx context.Context, op *core.Oper } } -func (mm *multipartyManager) RunOperation(ctx context.Context, op *core.PreparedOperation) (outputs fftypes.JSONObject, complete bool, err error) { +func (mm *multipartyManager) RunOperation(ctx context.Context, op *core.PreparedOperation) (outputs fftypes.JSONObject, phase core.OpPhase, err error) { switch data := op.Data.(type) { case txcommon.BatchPinData: batch := data.Batch contract := mm.namespace.Contracts.Active - return nil, false, mm.blockchain.SubmitBatchPin(ctx, op.NamespacedIDString(), batch.Namespace, batch.Key, &blockchain.BatchPin{ + err = mm.blockchain.SubmitBatchPin(ctx, op.NamespacedIDString(), batch.Namespace, batch.Key, &blockchain.BatchPin{ TransactionID: batch.TX.ID, BatchID: batch.ID, BatchHash: batch.Hash, BatchPayloadRef: data.PayloadRef, Contexts: data.Contexts, }, contract.Location) - + return nil, operations.ErrTernary(err, core.OpPhaseInitializing, core.OpPhasePending), err case networkActionData: contract := mm.namespace.Contracts.Active - return nil, false, mm.blockchain.SubmitNetworkAction(ctx, op.NamespacedIDString(), data.Key, data.Type, contract.Location) - + err = mm.blockchain.SubmitNetworkAction(ctx, op.NamespacedIDString(), data.Key, data.Type, contract.Location) + return nil, operations.ErrTernary(err, core.OpPhaseInitializing, core.OpPhasePending), err default: - return nil, false, i18n.NewError(ctx, coremsgs.MsgOperationDataIncorrect, op.Data) + return nil, core.OpPhaseInitializing, i18n.NewError(ctx, coremsgs.MsgOperationDataIncorrect, op.Data) } } diff --git a/internal/multiparty/operations_test.go b/internal/multiparty/operations_test.go index 79c075d518..b7ee65f099 100644 --- a/internal/multiparty/operations_test.go +++ b/internal/multiparty/operations_test.go @@ -59,9 +59,9 @@ func TestPrepareAndRunBatchPin(t *testing.T) { assert.NoError(t, err) assert.Equal(t, batch, po.Data.(txcommon.BatchPinData).Batch) - _, complete, err := mp.RunOperation(context.Background(), opBatchPin(op, batch, contexts, "payload1")) + _, phase, err := mp.RunOperation(context.Background(), opBatchPin(op, batch, contexts, "payload1")) - assert.False(t, complete) + assert.Equal(t, core.OpPhasePending, phase) assert.NoError(t, err) } @@ -82,9 +82,9 @@ func TestPrepareAndRunNetworkAction(t *testing.T) { assert.NoError(t, err) assert.Equal(t, core.NetworkActionTerminate, po.Data.(networkActionData).Type) - _, complete, err := mp.RunOperation(context.Background(), opNetworkAction(op, core.NetworkActionTerminate, "0x123")) + _, phase, err := mp.RunOperation(context.Background(), opNetworkAction(op, core.NetworkActionTerminate, "0x123")) - assert.False(t, complete) + assert.Equal(t, core.OpPhasePending, phase) assert.NoError(t, err) mp.mbi.AssertExpectations(t) @@ -133,9 +133,9 @@ func TestRunOperationNotSupported(t *testing.T) { mp := newTestMultipartyManager() defer mp.cleanup(t) - _, complete, err := mp.RunOperation(context.Background(), &core.PreparedOperation{}) + _, phase, err := mp.RunOperation(context.Background(), &core.PreparedOperation{}) - assert.False(t, complete) + assert.Equal(t, core.OpPhaseInitializing, phase) assert.Regexp(t, "FF10378", err) } @@ -204,9 +204,9 @@ func TestRunBatchPinV1(t *testing.T) { mp.mbi.On("SubmitBatchPin", context.Background(), "ns1:"+op.ID.String(), "ns1", "0x123", mock.Anything, mock.Anything).Return(nil) - _, complete, err := mp.RunOperation(context.Background(), opBatchPin(op, batch, contexts, "payload1")) + _, phase, err := mp.RunOperation(context.Background(), opBatchPin(op, batch, contexts, "payload1")) - assert.False(t, complete) + assert.Equal(t, core.OpPhasePending, phase) assert.NoError(t, err) } diff --git a/internal/namespace/config.go b/internal/namespace/config.go index 98cfa70fda..16ff8ade6c 100644 --- a/internal/namespace/config.go +++ b/internal/namespace/config.go @@ -19,6 +19,7 @@ package namespace import ( "github.com/hyperledger/firefly-common/pkg/auth/authfactory" "github.com/hyperledger/firefly-common/pkg/config" + "github.com/hyperledger/firefly-common/pkg/fftls" "github.com/hyperledger/firefly/internal/blockchain/bifactory" "github.com/hyperledger/firefly/internal/coreconfig" "github.com/hyperledger/firefly/internal/database/difactory" @@ -71,6 +72,11 @@ func InitConfig() { contractConf.AddKnownKey(coreconfig.NamespaceMultipartyContractLocation) contractConf.AddKnownKey(coreconfig.NamespaceMultipartyContractOptions) + tlsConfigs := namespacePredefined.SubArray(coreconfig.NamespaceTLSConfigs) + tlsConfigs.AddKnownKey(coreconfig.NamespaceTLSConfigName) + tlsConf := tlsConfigs.SubSection(coreconfig.NamespaceTLSConfigTLSSection) + fftls.InitTLSConfig(tlsConf) + bifactory.InitConfig(blockchainConfig) difactory.InitConfig(databaseConfig) ssfactory.InitConfig(sharedstorageConfig) diff --git a/internal/namespace/configreload.go b/internal/namespace/configreload.go index cfa3e1ef48..486c111eca 100644 --- a/internal/namespace/configreload.go +++ b/internal/namespace/configreload.go @@ -25,6 +25,7 @@ import ( "github.com/hyperledger/firefly-common/pkg/fftypes" "github.com/hyperledger/firefly-common/pkg/log" "github.com/hyperledger/firefly/internal/coreconfig" + "github.com/hyperledger/firefly/internal/orchestrator" "github.com/spf13/viper" ) @@ -95,6 +96,15 @@ func (nm *namespaceManager) configReloaded(ctx context.Context) { // Stop all defunct plugins - now the namespaces using them are all stopped nm.stopDefunctPlugins(ctx, pluginsToStop) + // If there are any namespaces that are completely gone at this point we need to purge + // them from the system (all the handlers/callback registrations), before we update the + // namespace list and lose track of the old orchestrator + for oldNSName, oldNS := range nm.namespaces { + if _, stillExists := availableNS[oldNSName]; !stillExists && oldNS.orchestrator != nil { + orchestrator.Purge(ctx, &oldNS.Namespace, oldNS.plugins, oldNS.config.Multiparty.Node.Name) + } + } + // Update the new lists nm.plugins = availablePlugins nm.namespaces = availableNS diff --git a/internal/namespace/configreload_test.go b/internal/namespace/configreload_test.go index d97f041ae7..f10bd18696 100644 --- a/internal/namespace/configreload_test.go +++ b/internal/namespace/configreload_test.go @@ -21,6 +21,7 @@ import ( "fmt" "io/ioutil" "os" + "reflect" "strings" "sync" "testing" @@ -802,6 +803,23 @@ namespaces: } +func mockPurge(nmm *nmMocks, nsName string) { + matchNil := mock.MatchedBy(func(i interface{}) bool { + return i == nil || reflect.ValueOf(i).IsNil() + }) + nmm.mdi.On("SetHandler", nsName, matchNil).Return() + nmm.mbi.On("SetHandler", nsName, matchNil).Return() + nmm.mbi.On("SetOperationHandler", nsName, matchNil).Return() + nmm.mps.On("SetHandler", nsName, matchNil).Return().Maybe() + nmm.mps.On("SetOperationHandler", nsName, matchNil).Return().Maybe() + nmm.mdx.On("SetHandler", nsName, mock.Anything, matchNil).Return().Maybe() + nmm.mdx.On("SetOperationHandler", nsName, matchNil).Return().Maybe() + for _, mti := range nmm.mti { + mti.On("SetHandler", nsName, matchNil).Return().Maybe() + mti.On("SetOperationHandler", nsName, matchNil).Return().Maybe() + } +} + func TestConfigDownToNothingOk(t *testing.T) { logrus.SetLevel(logrus.TraceLevel) @@ -816,6 +834,10 @@ func TestConfigDownToNothingOk(t *testing.T) { defer cancelCtx() mockInitConfig(nmm) + + // Because the namespaces are deleted, we should get calls with nil on all the plugins + mockPurge(nmm, "ns1") + mockPurge(nmm, "ns2") waitInit := namespaceInitWaiter(t, nmm, []string{"ns1", "ns2"}) err = nm.Init(ctx, cancelCtx, make(chan bool), func() error { return nil }) @@ -863,6 +885,9 @@ func TestConfigStartPluginsFails(t *testing.T) { defer cancelCtx() mockInitConfig(nmm) + mockPurge(nmm, "ns1") + mockPurge(nmm, "ns2") + waitInit := namespaceInitWaiter(t, nmm, []string{"ns1", "ns2"}) err = nm.Init(ctx, cancelCtx, make(chan bool), func() error { return nil }) diff --git a/internal/namespace/manager.go b/internal/namespace/manager.go index b3b28a3547..36409a5742 100644 --- a/internal/namespace/manager.go +++ b/internal/namespace/manager.go @@ -18,6 +18,7 @@ package namespace import ( "context" + "crypto/tls" "fmt" "strconv" "sync" @@ -26,6 +27,7 @@ import ( "github.com/hyperledger/firefly-common/pkg/auth" "github.com/hyperledger/firefly-common/pkg/auth/authfactory" "github.com/hyperledger/firefly-common/pkg/config" + "github.com/hyperledger/firefly-common/pkg/fftls" "github.com/hyperledger/firefly-common/pkg/fftypes" "github.com/hyperledger/firefly-common/pkg/i18n" "github.com/hyperledger/firefly-common/pkg/log" @@ -298,7 +300,7 @@ func (nm *namespaceManager) findV1Contract(ns *namespace) *core.MultipartyContra // // Note that plugins have a separate lifecycle, independent from namespace orchestrators. func (nm *namespaceManager) namespaceStarter(ns *namespace) { - _ = nm.nsStartupRetry.Do(nm.ctx, fmt.Sprintf("namespace %s", ns.Name), func(attempt int) (retry bool, err error) { + _ = nm.nsStartupRetry.Do(ns.ctx, fmt.Sprintf("namespace %s", ns.Name), func(attempt int) (retry bool, err error) { startTime := time.Now() err = nm.initAndStartNamespace(ns) // If we started successfully, then all is good @@ -768,6 +770,34 @@ func (nm *namespaceManager) loadNamespaces(ctx context.Context, rawConfig fftype return newNS, err } +func (nm *namespaceManager) loadTLSConfig(ctx context.Context, tlsConfigs map[string]*tls.Config, conf config.ArraySection) (err error) { + tlsConfigArraySize := conf.ArraySize() + + for i := 0; i < tlsConfigArraySize; i++ { + entry := conf.ArrayEntry(i) + name := entry.GetString(coreconfig.NamespaceTLSConfigName) + tlsConf := entry.SubSection(coreconfig.NamespaceTLSConfigTLSSection) + + tlsConfig, err := fftls.ConstructTLSConfig(ctx, tlsConf, fftls.ClientType) + if err != nil { + return err + } + + if tlsConfig == nil { + // Config not enabled + continue + } + + if tlsConfigs[name] != nil { + return i18n.NewError(ctx, coremsgs.MsgDuplicateTLSConfig, name) + } + + tlsConfigs[name] = tlsConfig + } + + return nil +} + func (nm *namespaceManager) loadNamespace(ctx context.Context, name string, index int, conf config.Section, rawNSConfig fftypes.JSONObject, availablePlugins map[string]*plugin) (ns *namespace, err error) { if err := fftypes.ValidateFFNameField(ctx, name, fmt.Sprintf("namespaces.predefined[%d].name", index)); err != nil { return nil, err @@ -850,6 +880,15 @@ func (nm *namespaceManager) loadNamespace(ctx context.Context, name string, inde } } + // Handle TLS Configs + tlsConfigArray := conf.SubArray(coreconfig.NamespaceTLSConfigs) + tlsConfigs := make(map[string]*tls.Config) + + err = nm.loadTLSConfig(ctx, tlsConfigs, tlsConfigArray) + if err != nil { + return nil, err + } + config := orchestrator.Config{ DefaultKey: conf.GetString(coreconfig.NamespaceDefaultKey), TokenBroadcastNames: nm.tokenBroadcastNames, @@ -886,6 +925,7 @@ func (nm *namespaceManager) loadNamespace(ctx context.Context, name string, inde Name: name, NetworkName: networkName, Description: conf.GetString(coreconfig.NamespaceDescription), + TLSConfigs: tlsConfigs, }, loadTime: fftypes.Now(), config: config, diff --git a/internal/namespace/manager_test.go b/internal/namespace/manager_test.go index a639db704f..6dd09e002b 100644 --- a/internal/namespace/manager_test.go +++ b/internal/namespace/manager_test.go @@ -18,7 +18,16 @@ package namespace import ( "context" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" "fmt" + "log" + "math/big" + "net" "os" "strings" "testing" @@ -72,6 +81,7 @@ namespaces: predefined: - defaultKey: 0xbEa50Ec98776beF144Fc63078e7b15291Ac64cfA name: default + tlsConfigs: [] plugins: - ethereum - postgres @@ -1305,6 +1315,206 @@ func TestLoadNamespacesUseDefaults(t *testing.T) { assert.Equal(t, "oldest", newNS["ns1"].config.Multiparty.Contracts[0].FirstEvent) } +func TestLoadTLSConfigsBadTLS(t *testing.T) { + nm, _, cleanup := newTestNamespaceManager(t, true) + defer cleanup() + + coreconfig.Reset() + viper.SetConfigType("yaml") + err := viper.ReadConfig(strings.NewReader(` +namespaces: + default: ns1 + predefined: + - name: ns1 + tlsConfigs: + - name: myconfig + tls: + enabled: true + caFile: my-ca + certFile: my-cert + keyFile: my-key + `)) + assert.NoError(t, err) + + // RawConfig to Section! + tlsConfigArray := namespacePredefined.ArrayEntry(0).SubArray(coreconfig.NamespaceTLSConfigs) + tlsConfigs := make(map[string]*tls.Config) + err = nm.loadTLSConfig(nm.ctx, tlsConfigs, tlsConfigArray) + assert.Regexp(t, "FF00153", err) +} + +func TestLoadTLSConfigsNotEnabled(t *testing.T) { + nm, _, cleanup := newTestNamespaceManager(t, true) + defer cleanup() + + coreconfig.Reset() + viper.SetConfigType("yaml") + err := viper.ReadConfig(strings.NewReader(` +namespaces: + default: ns1 + predefined: + - name: ns1 + tlsConfigs: + - name: myconfig + tls: + enabled: false + caFile: my-ca + certFile: my-cert + keyFile: my-key + `)) + assert.NoError(t, err) + + // RawConfig to Section! + tlsConfigArray := namespacePredefined.ArrayEntry(0).SubArray(coreconfig.NamespaceTLSConfigs) + tlsConfigs := make(map[string]*tls.Config) + err = nm.loadTLSConfig(nm.ctx, tlsConfigs, tlsConfigArray) + assert.NoError(t, err) + assert.Nil(t, tlsConfigs["myconfig"]) +} + +func generateTestCertificates() (*os.File, *os.File, func()) { + // Create an X509 certificate pair + privatekey, _ := rsa.GenerateKey(rand.Reader, 2048) + publickey := &privatekey.PublicKey + var privateKeyBytes []byte = x509.MarshalPKCS1PrivateKey(privatekey) + privateKeyFile, _ := os.CreateTemp("", "key.pem") + privateKeyBlock := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: privateKeyBytes} + pem.Encode(privateKeyFile, privateKeyBlock) + serialNumber, _ := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128)) + x509Template := &x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + Organization: []string{"Unit Tests"}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(1000 * time.Second), + KeyUsage: x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + BasicConstraintsValid: true, + IPAddresses: []net.IP{net.IPv4(127, 0, 0, 1)}, + } + derBytes, _ := x509.CreateCertificate(rand.Reader, x509Template, x509Template, publickey, privatekey) + publicKeyFile, _ := os.CreateTemp("", "cert.pem") + pem.Encode(publicKeyFile, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) + + return publicKeyFile, privateKeyFile, func() { + os.Remove(publicKeyFile.Name()) + os.Remove(privateKeyFile.Name()) + } +} + +func TestLoadTLSConfigs(t *testing.T) { + nm, _, cleanup := newTestNamespaceManager(t, true) + defer cleanup() + + publicKeyFile, privateKeyFile, cleanCertificates := generateTestCertificates() + defer cleanCertificates() + + caCert, err := os.ReadFile(publicKeyFile.Name()) + if err != nil { + log.Fatal(err) + } + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + + cert, err := tls.LoadX509KeyPair(publicKeyFile.Name(), privateKeyFile.Name()) + assert.NoError(t, err) + expectedTLSConfig := &tls.Config{ + RootCAs: caCertPool, + Certificates: []tls.Certificate{cert}, + } + + coreconfig.Reset() + viper.SetConfigType("yaml") + err = viper.ReadConfig(strings.NewReader(fmt.Sprintf(` +namespaces: + default: ns1 + predefined: + - name: ns1 + tlsConfigs: + - name: myconfig + tls: + enabled: true + caFile: %s + certFile: %s + keyFile: %s + `, publicKeyFile.Name(), publicKeyFile.Name(), privateKeyFile.Name()))) + assert.NoError(t, err) + + // RawConfig to Section! + tlsConfigArray := namespacePredefined.ArrayEntry(0).SubArray(coreconfig.NamespaceTLSConfigs) + tlsConfigs := make(map[string]*tls.Config) + err = nm.loadTLSConfig(nm.ctx, tlsConfigs, tlsConfigArray) + assert.NoError(t, err) + assert.NotNil(t, tlsConfigs["myconfig"]) + assert.True(t, tlsConfigs["myconfig"].RootCAs.Equal(expectedTLSConfig.RootCAs)) + assert.Equal(t, tlsConfigs["myconfig"].Certificates, expectedTLSConfig.Certificates) +} + +func TestLoadTLSConfigsDuplicateConfigs(t *testing.T) { + nm, _, cleanup := newTestNamespaceManager(t, true) + defer cleanup() + + publicKeyFile, privateKeyFile, cleanCertificates := generateTestCertificates() + defer cleanCertificates() + + coreconfig.Reset() + viper.SetConfigType("yaml") + err := viper.ReadConfig(strings.NewReader(fmt.Sprintf(` +namespaces: + default: ns1 + predefined: + - name: ns1 + tlsConfigs: + - name: myconfig + tls: + enabled: true + caFile: %s + certFile: %s + keyFile: %s + - name: myconfig + tls: + enabled: true + caFile: %s + certFile: %s + keyFile: %s + `, publicKeyFile.Name(), publicKeyFile.Name(), privateKeyFile.Name(), + publicKeyFile.Name(), publicKeyFile.Name(), privateKeyFile.Name()))) + assert.NoError(t, err) + + // RawConfig to Section! + tlsConfigArray := namespacePredefined.ArrayEntry(0).SubArray(coreconfig.NamespaceTLSConfigs) + tlsConfigs := make(map[string]*tls.Config) + err = nm.loadTLSConfig(nm.ctx, tlsConfigs, tlsConfigArray) + assert.Regexp(t, "FF10454", err) +} + +func TestLoadNamespacesWithErrorTLSConfigs(t *testing.T) { + nm, _, cleanup := newTestNamespaceManager(t, true) + defer cleanup() + + coreconfig.Reset() + viper.SetConfigType("yaml") + err := viper.ReadConfig(strings.NewReader(` +namespaces: + default: ns1 + predefined: + - name: ns1 + tlsConfigs: + - name: myconfig + tls: + enabled: true + caFile: my-ca + certFile: my-cert + keyFile: my-key + `)) + assert.NoError(t, err) + + nm.namespaces, err = nm.loadNamespaces(context.Background(), nm.dumpRootConfig(), nm.plugins) + + assert.Regexp(t, "FF00153", err) +} + func TestLoadNamespacesNonMultipartyNoDatabase(t *testing.T) { nm, _, cleanup := newTestNamespaceManager(t, true) defer cleanup() diff --git a/internal/networkmap/did.go b/internal/networkmap/did.go index 8ecab752a5..af1244ccab 100644 --- a/internal/networkmap/did.go +++ b/internal/networkmap/did.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -77,6 +77,8 @@ func (nm *networkMap) generateDIDAuthentication(ctx context.Context, identity *c switch verifier.Type { case core.VerifierTypeEthAddress: return nm.generateEthAddressVerifier(identity, verifier) + case core.VerifierTypeTezosAddress: + return nm.generateTezosAddressVerifier(identity, verifier) case core.VerifierTypeMSPIdentity: return nm.generateMSPVerifier(identity, verifier) case core.VerifierTypeFFDXPeerID: @@ -96,6 +98,15 @@ func (nm *networkMap) generateEthAddressVerifier(identity *core.Identity, verifi } } +func (nm *networkMap) generateTezosAddressVerifier(identity *core.Identity, verifier *core.Verifier) *VerificationMethod { + return &VerificationMethod{ + ID: verifier.Hash.String(), + Type: "Ed25519VerificationKey2020", + Controller: identity.DID, + BlockchainAccountID: verifier.Value, + } +} + func (nm *networkMap) generateMSPVerifier(identity *core.Identity, verifier *core.Verifier) *VerificationMethod { return &VerificationMethod{ ID: verifier.Hash.String(), diff --git a/internal/networkmap/did_test.go b/internal/networkmap/did_test.go index ad02423265..2040861eaf 100644 --- a/internal/networkmap/did_test.go +++ b/internal/networkmap/did_test.go @@ -43,6 +43,15 @@ func TestDIDGenerationOK(t *testing.T) { }, Created: fftypes.Now(), }).Seal() + verifierTezos := (&core.Verifier{ + Identity: org1.ID, + Namespace: org1.Namespace, + VerifierRef: core.VerifierRef{ + Type: core.VerifierTypeTezosAddress, + Value: "tz1Y6GnVhC4EpcDDSmD3ibcC4WX6DJ4Q1QLN", + }, + Created: fftypes.Now(), + }).Seal() verifierMSP := (&core.Verifier{ Identity: org1.ID, Namespace: org1.Namespace, @@ -75,6 +84,7 @@ func TestDIDGenerationOK(t *testing.T) { mdi.On("GetIdentityByID", nm.ctx, "ns1", mock.Anything).Return(org1, nil) mdi.On("GetVerifiers", nm.ctx, "ns1", mock.Anything).Return([]*core.Verifier{ verifierEth, + verifierTezos, verifierMSP, verifierDX, verifierUnknown, @@ -95,6 +105,12 @@ func TestDIDGenerationOK(t *testing.T) { Controller: org1.DID, BlockchainAccountID: verifierEth.Value, }, + { + ID: verifierTezos.Hash.String(), + Type: "Ed25519VerificationKey2020", + Controller: org1.DID, + BlockchainAccountID: verifierTezos.Value, + }, { ID: verifierMSP.Hash.String(), Type: "HyperledgerFabricMSPIdentity", @@ -110,6 +126,7 @@ func TestDIDGenerationOK(t *testing.T) { }, Authentication: []string{ fmt.Sprintf("#%s", verifierEth.Hash.String()), + fmt.Sprintf("#%s", verifierTezos.Hash.String()), fmt.Sprintf("#%s", verifierMSP.Hash.String()), fmt.Sprintf("#%s", verifierDX.Hash.String()), }, diff --git a/internal/networkmap/register_identity.go b/internal/networkmap/register_identity.go index 43a4c218be..3fef6c74f6 100644 --- a/internal/networkmap/register_identity.go +++ b/internal/networkmap/register_identity.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -111,5 +111,5 @@ func (nm *networkMap) RegisterIdentity(ctx context.Context, dto *core.IdentityCr } func (nm *networkMap) sendIdentityRequest(ctx context.Context, identity *core.Identity, claimSigner *core.SignerRef, parentSigner *core.SignerRef) error { - return nm.defsender.ClaimIdentity(ctx, &core.IdentityClaim{Identity: identity}, claimSigner, parentSigner, false) + return nm.defsender.ClaimIdentity(ctx, &core.IdentityClaim{Identity: identity}, claimSigner, parentSigner) } diff --git a/internal/networkmap/register_identity_test.go b/internal/networkmap/register_identity_test.go index fe2a5b9f1f..c75b23fabc 100644 --- a/internal/networkmap/register_identity_test.go +++ b/internal/networkmap/register_identity_test.go @@ -54,7 +54,7 @@ func TestRegisterIdentityOrgWithParentOk(t *testing.T) { mock.MatchedBy(func(sr *core.SignerRef) bool { return sr.Key == "0x23456" }), - false).Return(nil) + ).Return(nil) org, err := nm.RegisterIdentity(nm.ctx, &core.IdentityCreateDTO{ Name: "child1", @@ -101,7 +101,7 @@ func TestRegisterIdentityOrgWithParentWaitConfirmOk(t *testing.T) { mock.MatchedBy(func(sr *core.SignerRef) bool { return sr.Key == "0x23456" }), - false).Return(nil) + ).Return(nil) _, err := nm.RegisterIdentity(nm.ctx, &core.IdentityCreateDTO{ Name: "child1", @@ -139,7 +139,7 @@ func TestRegisterIdentityOrgNonMultiparty(t *testing.T) { return sr.Key == "0x12345" }), (*core.SignerRef)(nil), - false).Return(fmt.Errorf("pop")) + ).Return(fmt.Errorf("pop")) _, err := nm.RegisterIdentity(nm.ctx, &core.IdentityCreateDTO{ Name: "custom1", @@ -175,7 +175,7 @@ func TestRegisterIdentityCustomWithParentFail(t *testing.T) { mock.MatchedBy(func(sr *core.SignerRef) bool { return sr.Key == "0x23456" }), - false).Return(nil) + ).Return(nil) org, err := nm.RegisterIdentity(nm.ctx, &core.IdentityCreateDTO{ Name: "child1", diff --git a/internal/networkmap/register_node.go b/internal/networkmap/register_node.go index 7a3d3f23b6..3b5a0b6978 100644 --- a/internal/networkmap/register_node.go +++ b/internal/networkmap/register_node.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -26,7 +26,7 @@ import ( func (nm *networkMap) RegisterNode(ctx context.Context, waitConfirm bool) (identity *core.Identity, err error) { - nodeOwningOrg, err := nm.identity.GetMultipartyRootOrg(ctx) + nodeOwningOrg, err := nm.identity.GetRootOrg(ctx) if err != nil { return nil, err } diff --git a/internal/networkmap/register_node_test.go b/internal/networkmap/register_node_test.go index 4b499210c9..5f1e2fd1a9 100644 --- a/internal/networkmap/register_node_test.go +++ b/internal/networkmap/register_node_test.go @@ -40,7 +40,7 @@ func TestRegisterNodeOk(t *testing.T) { signerRef := &core.SignerRef{Key: "0x23456"} mim := nm.identity.(*identitymanagermocks.Manager) - mim.On("GetMultipartyRootOrg", nm.ctx).Return(parentOrg, nil) + mim.On("GetRootOrg", nm.ctx).Return(parentOrg, nil) mim.On("VerifyIdentityChain", nm.ctx, mock.AnythingOfType("*core.Identity")).Return(parentOrg, false, nil) mim.On("ResolveIdentitySigner", nm.ctx, parentOrg).Return(signerRef, nil) @@ -55,7 +55,7 @@ func TestRegisterNodeOk(t *testing.T) { mock.AnythingOfType("*core.IdentityClaim"), signerRef, (*core.SignerRef)(nil), - false).Return(nil) + ).Return(nil) mmp := nm.multiparty.(*multipartymocks.Manager) mmp.On("LocalNode").Return(multiparty.LocalNode{Name: "node1"}) @@ -78,7 +78,7 @@ func TestRegisterNodeMissingName(t *testing.T) { parentOrg := testOrg("org1") mim := nm.identity.(*identitymanagermocks.Manager) - mim.On("GetMultipartyRootOrg", nm.ctx).Return(parentOrg, nil) + mim.On("GetRootOrg", nm.ctx).Return(parentOrg, nil) mmp := nm.multiparty.(*multipartymocks.Manager) mmp.On("LocalNode").Return(multiparty.LocalNode{}) @@ -98,7 +98,7 @@ func TestRegisterNodePeerInfoFail(t *testing.T) { parentOrg := testOrg("org1") mim := nm.identity.(*identitymanagermocks.Manager) - mim.On("GetMultipartyRootOrg", nm.ctx).Return(parentOrg, nil) + mim.On("GetRootOrg", nm.ctx).Return(parentOrg, nil) mdx := nm.exchange.(*dataexchangemocks.Plugin) mdx.On("GetEndpointInfo", nm.ctx, "node1").Return(fftypes.JSONObject{}, fmt.Errorf("pop")) @@ -120,7 +120,7 @@ func TestRegisterNodeGetOwnerFail(t *testing.T) { defer cancel() mim := nm.identity.(*identitymanagermocks.Manager) - mim.On("GetMultipartyRootOrg", nm.ctx).Return(nil, fmt.Errorf("pop")) + mim.On("GetRootOrg", nm.ctx).Return(nil, fmt.Errorf("pop")) _, err := nm.RegisterNode(nm.ctx, false) assert.Regexp(t, "pop", err) diff --git a/internal/networkmap/register_org.go b/internal/networkmap/register_org.go index 0288e68a9c..0f307f4dd8 100644 --- a/internal/networkmap/register_org.go +++ b/internal/networkmap/register_org.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -27,7 +27,7 @@ import ( // RegisterNodeOrganization is a convenience helper to register the org configured on the node, without any extra info func (nm *networkMap) RegisterNodeOrganization(ctx context.Context, waitConfirm bool) (*core.Identity, error) { - key, err := nm.identity.GetMultipartyRootVerifier(ctx) + key, err := nm.identity.ResolveMultipartyRootVerifier(ctx) if err != nil { return nil, err } diff --git a/internal/networkmap/register_org_test.go b/internal/networkmap/register_org_test.go index ae4ed84c5b..fdc84ca720 100644 --- a/internal/networkmap/register_org_test.go +++ b/internal/networkmap/register_org_test.go @@ -59,7 +59,7 @@ func TestRegisterNodeOrgOk(t *testing.T) { defer cancel() mim := nm.identity.(*identitymanagermocks.Manager) - mim.On("GetMultipartyRootVerifier", nm.ctx).Return(&core.VerifierRef{ + mim.On("ResolveMultipartyRootVerifier", nm.ctx).Return(&core.VerifierRef{ Value: "0x12345", }, nil) mim.On("VerifyIdentityChain", nm.ctx, mock.AnythingOfType("*core.Identity")).Return(nil, false, nil) @@ -74,7 +74,7 @@ func TestRegisterNodeOrgOk(t *testing.T) { return sr.Key == "0x12345" }), (*core.SignerRef)(nil), - false).Return(nil) + ).Return(nil) org, err := nm.RegisterNodeOrganization(nm.ctx, false) assert.NoError(t, err) @@ -91,7 +91,7 @@ func TestRegisterNodeOrgNoName(t *testing.T) { defer cancel() mim := nm.identity.(*identitymanagermocks.Manager) - mim.On("GetMultipartyRootVerifier", nm.ctx).Return(&core.VerifierRef{ + mim.On("ResolveMultipartyRootVerifier", nm.ctx).Return(&core.VerifierRef{ Value: "0x12345", }, nil) @@ -111,7 +111,7 @@ func TestRegisterNodeGetOwnerBlockchainKeyFail(t *testing.T) { defer cancel() mim := nm.identity.(*identitymanagermocks.Manager) - mim.On("GetMultipartyRootVerifier", nm.ctx).Return(nil, fmt.Errorf("pop")) + mim.On("ResolveMultipartyRootVerifier", nm.ctx).Return(nil, fmt.Errorf("pop")) _, err := nm.RegisterNodeOrganization(nm.ctx, false) assert.Regexp(t, "pop", err) diff --git a/internal/operations/context.go b/internal/operations/context.go index 7da12bb038..d31a05f70e 100644 --- a/internal/operations/context.go +++ b/internal/operations/context.go @@ -92,3 +92,19 @@ func (om *operationsManager) AddOrReuseOperation(ctx context.Context, op *core.O } return err } + +func (om *operationsManager) BulkInsertOperations(ctx context.Context, ops ...*core.Operation) error { + // This efficiently inserts the operations. + // It's all-or nothing success/failure, as ops individually don't have idempotency duplicates to + // worry about - that's handled by the wrapping transaction layer. + // + // Thin wrapper on the database, that manages cache. Expected to be run on a batch worker setting + // up idempotent transactions, not the context of an individual operation. + if err := om.database.InsertOperations(ctx, ops); err != nil { + return err + } + for _, op := range ops { + om.cacheOperation(op) + } + return nil +} diff --git a/internal/operations/context_test.go b/internal/operations/context_test.go index 2f3856acee..122c0b82d9 100644 --- a/internal/operations/context_test.go +++ b/internal/operations/context_test.go @@ -6,7 +6,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -137,3 +137,55 @@ func TestGetContextKeyBadJSON(t *testing.T) { _, err := getContextKey(op) assert.Error(t, err) } + +func TestBulkInsertOperationsOk(t *testing.T) { + om, cancel := newTestOperations(t) + defer cancel() + + ctx := context.Background() + op1 := &core.Operation{ + ID: fftypes.NewUUID(), + Type: core.OpTypeBlockchainPinBatch, + Input: fftypes.JSONObject{"batch": "1"}, + Status: core.OpStatusFailed, + } + op2 := &core.Operation{ + ID: fftypes.NewUUID(), + Type: core.OpTypeBlockchainPinBatch, + Input: fftypes.JSONObject{"batch": "1"}, + Status: core.OpStatusPending, + } + + mdi := om.database.(*databasemocks.Plugin) + mdi.On("InsertOperations", ctx, []*core.Operation{op1, op2}).Return(nil).Once() + + err := om.BulkInsertOperations(ctx, op1, op2) + assert.NoError(t, err) + +} + +func TestBulkInsertOperationsFail(t *testing.T) { + om, cancel := newTestOperations(t) + defer cancel() + + ctx := context.Background() + op1 := &core.Operation{ + ID: fftypes.NewUUID(), + Type: core.OpTypeBlockchainPinBatch, + Input: fftypes.JSONObject{"batch": "1"}, + Status: core.OpStatusFailed, + } + op2 := &core.Operation{ + ID: fftypes.NewUUID(), + Type: core.OpTypeBlockchainPinBatch, + Input: fftypes.JSONObject{"batch": "1"}, + Status: core.OpStatusPending, + } + + mdi := om.database.(*databasemocks.Plugin) + mdi.On("InsertOperations", ctx, []*core.Operation{op1, op2}).Return(fmt.Errorf("pop")).Once() + + err := om.BulkInsertOperations(ctx, op1, op2) + assert.Regexp(t, "pop", err) + +} diff --git a/internal/operations/manager.go b/internal/operations/manager.go index ad0e611fe6..65c3d0f8ef 100644 --- a/internal/operations/manager.go +++ b/internal/operations/manager.go @@ -35,17 +35,18 @@ import ( type OperationHandler interface { core.Named PrepareOperation(ctx context.Context, op *core.Operation) (*core.PreparedOperation, error) - RunOperation(ctx context.Context, op *core.PreparedOperation) (outputs fftypes.JSONObject, complete bool, err error) + RunOperation(ctx context.Context, op *core.PreparedOperation) (outputs fftypes.JSONObject, phase core.OpPhase, err error) OnOperationUpdate(ctx context.Context, op *core.Operation, update *core.OperationUpdate) error } type Manager interface { RegisterHandler(ctx context.Context, handler OperationHandler, ops []core.OpType) PrepareOperation(ctx context.Context, op *core.Operation) (*core.PreparedOperation, error) - RunOperation(ctx context.Context, op *core.PreparedOperation, options ...RunOperationOption) (fftypes.JSONObject, error) + RunOperation(ctx context.Context, op *core.PreparedOperation, idempotentSubmit bool) (fftypes.JSONObject, error) RetryOperation(ctx context.Context, opID *fftypes.UUID) (*core.Operation, error) - ResubmitOperations(ctx context.Context, txID *fftypes.UUID) (*core.Operation, error) + ResubmitOperations(ctx context.Context, txID *fftypes.UUID) (total int, resubmit []*core.Operation, err error) AddOrReuseOperation(ctx context.Context, op *core.Operation, hooks ...database.PostCompletionHook) error + BulkInsertOperations(ctx context.Context, ops ...*core.Operation) error SubmitOperationUpdate(update *core.OperationUpdate) GetOperationByIDCached(ctx context.Context, opID *fftypes.UUID) (*core.Operation, error) ResolveOperationByID(ctx context.Context, opID *fftypes.UUID, op *core.OperationUpdateDTO) error @@ -53,17 +54,25 @@ type Manager interface { WaitStop() } -type RunOperationOption int +// ConflictError can be implemented by connectors to prevent an operation being overridden to failed +type ConflictError interface { + IsConflictError() bool +} -const ( - RemainPendingOnFailure RunOperationOption = iota -) +func ErrTernary(err error, ifErr, ifNoError core.OpPhase) core.OpPhase { + phase := ifErr + if err == nil { + phase = ifNoError + } + return phase +} type operationsManager struct { ctx context.Context namespace string database database.Plugin handlers map[core.OpType]OperationHandler + txHelper txcommon.Helper updater *operationUpdater cache cache.CInterface } @@ -90,6 +99,7 @@ func NewOperationsManager(ctx context.Context, ns string, di database.Plugin, tx ctx: ctx, namespace: ns, database: di, + txHelper: txHelper, handlers: make(map[core.OpType]OperationHandler), } om.updater = newOperationUpdater(ctx, om, di, txHelper) @@ -112,46 +122,77 @@ func (om *operationsManager) PrepareOperation(ctx context.Context, op *core.Oper return handler.PrepareOperation(ctx, op) } -func (om *operationsManager) ResubmitOperations(ctx context.Context, txID *fftypes.UUID) (*core.Operation, error) { +func (om *operationsManager) ResubmitOperations(ctx context.Context, txID *fftypes.UUID) (int, []*core.Operation, error) { var resubmitErr error - var operation *core.Operation fb := database.OperationQueryFactory.NewFilter(ctx) filter := fb.And( fb.Eq("tx", txID), - fb.Eq("status", core.OpStatusInitialized), ) - initializedOperations, _, opErr := om.database.GetOperations(ctx, om.namespace, filter) + allOperations, _, opErr := om.database.GetOperations(ctx, om.namespace, filter) if opErr != nil { // Couldn't query operations. Log and return the original error log.L(ctx).Errorf("Failed to lookup initialized operations for TX %v: %v", txID, opErr) - return nil, opErr + return -1, nil, opErr } - for _, nextInitializedOp := range initializedOperations { - operation = nextInitializedOp - prepOp, _ := om.PrepareOperation(ctx, nextInitializedOp) - _, resubmitErr = om.RunOperation(ctx, prepOp) + initializedOperations := make([]*core.Operation, 0, len(allOperations)) + for _, op := range allOperations { + if op.Status == core.OpStatusInitialized { + initializedOperations = append(initializedOperations, op) + } } - return operation, resubmitErr -} -func (om *operationsManager) RunOperation(ctx context.Context, op *core.PreparedOperation, options ...RunOperationOption) (fftypes.JSONObject, error) { - failState := core.OpStatusFailed - for _, o := range options { - if o == RemainPendingOnFailure { - failState = core.OpStatusPending + resubmitted := []*core.Operation{} + for _, nextInitializedOp := range initializedOperations { + // Check the cache to cover the window while we're flushing an update to storage in the workers + cachedOp := om.getCachedOperation(nextInitializedOp.ID) + if cachedOp != nil && cachedOp.Status != core.OpStatusInitialized { + log.L(ctx).Debugf("Skipping re-submission of operation %s with un-flushed storage update in cache. Cached status=%s", nextInitializedOp.ID, cachedOp.Status) + continue } + prepOp, _ := om.PrepareOperation(ctx, nextInitializedOp) + _, resubmitErr = om.RunOperation(ctx, prepOp, true /* we only call ResubmitOperations in idempotent submit cases */) + if resubmitErr != nil { + break + } + log.L(ctx).Infof("%d operation resubmitted as part of idempotent retry of TX %s", nextInitializedOp.ID, txID) + resubmitted = append(resubmitted, nextInitializedOp) } + return len(allOperations), resubmitted, resubmitErr +} +func (om *operationsManager) RunOperation(ctx context.Context, op *core.PreparedOperation, idempotentSubmit bool) (fftypes.JSONObject, error) { handler, ok := om.handlers[op.Type] if !ok { return nil, i18n.NewError(ctx, coremsgs.MsgOperationNotSupported, op.Type) } log.L(ctx).Infof("Executing %s operation %s via handler %s", op.Type, op.ID, handler.Name()) log.L(ctx).Tracef("Operation detail: %+v", op) - outputs, complete, err := handler.RunOperation(ctx, op) + outputs, phase, err := handler.RunOperation(ctx, op) if err != nil { + conflictErr, conflictTestOk := err.(ConflictError) + var failState core.OpStatus + switch { + case conflictTestOk && conflictErr.IsConflictError(): + // We are now pending - we know the connector has the action we're attempting to submit + // + // The async processing in SubmitOperationUpdate does not allow us to go back to pending, if + // we have progressed to failed through an async event that gets ordered before this update. + // So this is safe + failState = core.OpStatusPending + log.L(ctx).Infof("Setting operation %s operation %s status to %s after conflict", op.Type, op.ID, failState) + case phase == core.OpPhaseInitializing && idempotentSubmit: + // We haven't submitted the operation yet - so we will reuse the operation if the user retires with the same idempotency key + failState = core.OpStatusInitialized + case phase == core.OpPhasePending: + // This error is past the point we have submitted to the connector - idempotency error from here on in on resubmit. + // This also implies we are continuing to progress the transaction, and expecting it to update through events. + failState = core.OpStatusPending + default: + // Ok, we're failed + failState = core.OpStatusFailed + } om.SubmitOperationUpdate(&core.OperationUpdate{ NamespacedOpID: op.NamespacedIDString(), Plugin: op.Plugin, @@ -163,7 +204,7 @@ func (om *operationsManager) RunOperation(ctx context.Context, op *core.Prepared // No error so move us from "Initialized" to "Pending" newState := core.OpStatusPending - if complete { + if phase == core.OpPhaseComplete { // If the operation is actually completed synchronously skip "Pending" state and go to "Succeeded" newState = core.OpStatusSucceeded } @@ -191,12 +232,21 @@ func (om *operationsManager) findLatestRetry(ctx context.Context, opID *fftypes. func (om *operationsManager) RetryOperation(ctx context.Context, opID *fftypes.UUID) (op *core.Operation, err error) { var po *core.PreparedOperation + var idempotencyKey core.IdempotencyKey err = om.database.RunAsGroup(ctx, func(ctx context.Context) error { op, err = om.findLatestRetry(ctx, opID) if err != nil { return err } + tx, err := om.updater.txHelper.GetTransactionByIDCached(ctx, op.Transaction) + if err != nil { + return err + } + if tx != nil { + idempotencyKey = tx.IdempotencyKey + } + // Create a copy of the operation with a new ID op.ID = fftypes.NewUUID() op.Status = core.OpStatusInitialized @@ -223,7 +273,8 @@ func (om *operationsManager) RetryOperation(ctx context.Context, opID *fftypes.U return nil, err } - _, err = om.RunOperation(ctx, po) + log.L(ctx).Debugf("Retry initiation for operation %s idempotencyKey=%s", po.NamespacedIDString(), idempotencyKey) + _, err = om.RunOperation(ctx, po, idempotencyKey != "") return op, err } diff --git a/internal/operations/manager_test.go b/internal/operations/manager_test.go index 498777e47d..4e5448c01e 100644 --- a/internal/operations/manager_test.go +++ b/internal/operations/manager_test.go @@ -38,23 +38,36 @@ import ( ) type mockHandler struct { - Complete bool + Phase core.OpPhase + PrepErr error RunErr error Prepared *core.PreparedOperation Outputs fftypes.JSONObject UpdateErr error } +type mockConflictErr struct { + err error +} + +func (ce *mockConflictErr) Error() string { + return ce.err.Error() +} + +func (ce *mockConflictErr) IsConflictError() bool { + return true +} + func (m *mockHandler) Name() string { return "MockHandler" } func (m *mockHandler) PrepareOperation(ctx context.Context, op *core.Operation) (*core.PreparedOperation, error) { - return m.Prepared, m.RunErr + return m.Prepared, m.PrepErr } -func (m *mockHandler) RunOperation(ctx context.Context, op *core.PreparedOperation) (outputs fftypes.JSONObject, complete bool, err error) { - return m.Outputs, m.Complete, m.RunErr +func (m *mockHandler) RunOperation(ctx context.Context, op *core.PreparedOperation) (outputs fftypes.JSONObject, phase core.OpPhase, err error) { + return m.Outputs, m.Phase, m.RunErr } func (m *mockHandler) OnOperationUpdate(ctx context.Context, op *core.Operation, update *core.OperationUpdate) error { @@ -149,7 +162,7 @@ func TestRunOperationNotSupported(t *testing.T) { op := &core.PreparedOperation{} - _, err := om.RunOperation(context.Background(), op) + _, err := om.RunOperation(context.Background(), op, true) assert.Regexp(t, "FF10371", err) } @@ -163,7 +176,7 @@ func TestRunOperationSuccess(t *testing.T) { } om.RegisterHandler(ctx, &mockHandler{Outputs: fftypes.JSONObject{"test": "output"}}, []core.OpType{core.OpTypeBlockchainPinBatch}) - outputs, err := om.RunOperation(context.Background(), op) + outputs, err := om.RunOperation(context.Background(), op, true) assert.Equal(t, "output", outputs.GetString("test")) assert.NoError(t, err) @@ -185,20 +198,19 @@ func TestRunOperationSyncSuccess(t *testing.T) { Type: core.OpTypeBlockchainPinBatch, } - om.RegisterHandler(ctx, &mockHandler{Complete: true}, []core.OpType{core.OpTypeBlockchainPinBatch}) - _, err := om.RunOperation(ctx, op) + om.RegisterHandler(ctx, &mockHandler{Phase: core.OpPhaseComplete}, []core.OpType{core.OpTypeBlockchainPinBatch}) + _, err := om.RunOperation(ctx, op, true) assert.NoError(t, err) } -func TestRunOperationFail(t *testing.T) { +func TestRunOperationFailIdempotentInit(t *testing.T) { om, cancel := newTestOperations(t) defer cancel() om.updater.workQueues = []chan *core.OperationUpdate{ - make(chan *core.OperationUpdate), + make(chan *core.OperationUpdate, 1), } - om.updater.cancelFunc() ctx := context.Background() op := &core.PreparedOperation{ @@ -207,8 +219,68 @@ func TestRunOperationFail(t *testing.T) { Type: core.OpTypeBlockchainPinBatch, } - om.RegisterHandler(ctx, &mockHandler{RunErr: fmt.Errorf("pop")}, []core.OpType{core.OpTypeBlockchainPinBatch}) - _, err := om.RunOperation(ctx, op) + om.RegisterHandler(ctx, &mockHandler{ + RunErr: fmt.Errorf("pop"), + Phase: core.OpPhaseInitializing, + }, []core.OpType{core.OpTypeBlockchainPinBatch}) + _, err := om.RunOperation(ctx, op, true) + + update := <-om.updater.workQueues[0] + assert.Equal(t, "ns1:"+op.ID.String(), update.NamespacedOpID) + assert.Equal(t, core.OpStatusInitialized, update.Status) + + assert.EqualError(t, err, "pop") +} + +func TestRunOperationFailNonIdempotentInit(t *testing.T) { + om, cancel := newTestOperations(t) + defer cancel() + + om.updater.workQueues = []chan *core.OperationUpdate{ + make(chan *core.OperationUpdate, 1), + } + + ctx := context.Background() + op := &core.PreparedOperation{ + ID: fftypes.NewUUID(), + Namespace: "ns1", + Type: core.OpTypeBlockchainPinBatch, + } + + om.RegisterHandler(ctx, &mockHandler{ + RunErr: fmt.Errorf("pop"), + Phase: core.OpPhaseInitializing, + }, []core.OpType{core.OpTypeBlockchainPinBatch}) + _, err := om.RunOperation(ctx, op, false) + + update := <-om.updater.workQueues[0] + assert.Equal(t, "ns1:"+op.ID.String(), update.NamespacedOpID) + assert.Equal(t, core.OpStatusFailed, update.Status) + + assert.EqualError(t, err, "pop") +} + +func TestRunOperationFailConflict(t *testing.T) { + om, cancel := newTestOperations(t) + defer cancel() + + om.updater.workQueues = []chan *core.OperationUpdate{ + make(chan *core.OperationUpdate, 1), + } + + ctx := context.Background() + op := &core.PreparedOperation{ + ID: fftypes.NewUUID(), + Namespace: "ns1", + Type: core.OpTypeBlockchainPinBatch, + } + + om.RegisterHandler(ctx, &mockHandler{RunErr: &mockConflictErr{err: fmt.Errorf("pop")}}, []core.OpType{core.OpTypeBlockchainPinBatch}) + _, err := om.RunOperation(ctx, op, true) + + update := <-om.updater.workQueues[0] + assert.Equal(t, "ns1:"+op.ID.String(), update.NamespacedOpID) + assert.Equal(t, core.OpStatusPending, update.Status) assert.EqualError(t, err, "pop") } @@ -229,8 +301,11 @@ func TestRunOperationFailRemainPending(t *testing.T) { Type: core.OpTypeBlockchainPinBatch, } - om.RegisterHandler(ctx, &mockHandler{RunErr: fmt.Errorf("pop")}, []core.OpType{core.OpTypeBlockchainPinBatch}) - _, err := om.RunOperation(ctx, op, RemainPendingOnFailure) + om.RegisterHandler(ctx, &mockHandler{ + RunErr: fmt.Errorf("pop"), + Phase: core.OpPhasePending, + }, []core.OpType{core.OpTypeBlockchainPinBatch}) + _, err := om.RunOperation(ctx, op, false) assert.EqualError(t, err, "pop") } @@ -241,12 +316,14 @@ func TestRetryOperationSuccess(t *testing.T) { ctx := context.Background() opID := fftypes.NewUUID() + txID := fftypes.NewUUID() op := &core.Operation{ - ID: opID, - Namespace: "ns1", - Plugin: "blockchain", - Type: core.OpTypeBlockchainPinBatch, - Status: core.OpStatusFailed, + ID: opID, + Namespace: "ns1", + Plugin: "blockchain", + Transaction: txID, + Type: core.OpTypeBlockchainPinBatch, + Status: core.OpStatusFailed, } po := &core.PreparedOperation{ ID: op.ID, @@ -274,6 +351,11 @@ func TestRetryOperationSuccess(t *testing.T) { assert.Equal(t, op.ID.String(), val) return true })).Return(true, nil) + mdi.On("GetTransactionByID", mock.Anything, "ns1", txID).Return(&core.Transaction{ + ID: txID, + Namespace: "ns1", + IdempotencyKey: "idem1", + }, nil) om.RegisterHandler(ctx, &mockHandler{Prepared: po}, []core.OpType{core.OpTypeBlockchainPinBatch}) newOp, err := om.RetryOperation(ctx, op.ID) @@ -284,6 +366,39 @@ func TestRetryOperationSuccess(t *testing.T) { mdi.AssertExpectations(t) } +func TestRetryOperationGetTXFail(t *testing.T) { + om, cancel := newTestOperations(t) + defer cancel() + + ctx := context.Background() + opID := fftypes.NewUUID() + txID := fftypes.NewUUID() + op := &core.Operation{ + ID: opID, + Namespace: "ns1", + Plugin: "blockchain", + Transaction: txID, + Type: core.OpTypeBlockchainPinBatch, + Status: core.OpStatusFailed, + } + po := &core.PreparedOperation{ + ID: op.ID, + Type: op.Type, + } + + om.cache = cache.NewUmanagedCache(ctx, 100, 10*time.Minute) + om.cacheOperation(op) + + mdi := om.database.(*databasemocks.Plugin) + mdi.On("GetTransactionByID", mock.Anything, "ns1", txID).Return(nil, fmt.Errorf("pop")) + + om.RegisterHandler(ctx, &mockHandler{Prepared: po}, []core.OpType{core.OpTypeBlockchainPinBatch}) + _, err := om.RetryOperation(ctx, op.ID) + + assert.Regexp(t, "pop", err) + mdi.AssertExpectations(t) +} + func TestRetryOperationGetFail(t *testing.T) { om, cancel := newTestOperations(t) defer cancel() @@ -340,6 +455,7 @@ func TestRetryTwiceOperationInsertFail(t *testing.T) { mdi := om.database.(*databasemocks.Plugin) mdi.On("GetOperationByID", ctx, "ns1", opID).Return(op, nil) mdi.On("GetOperationByID", ctx, "ns1", opID2).Return(op2, nil) + mdi.On("GetTransactionByID", mock.Anything, "ns1", mock.Anything).Return(nil, nil) mdi.On("InsertOperation", ctx, mock.Anything).Return(fmt.Errorf("pop")) om.RegisterHandler(ctx, &mockHandler{Prepared: po}, []core.OpType{core.OpTypeBlockchainPinBatch}) @@ -369,6 +485,7 @@ func TestRetryOperationInsertFail(t *testing.T) { mdi := om.database.(*databasemocks.Plugin) mdi.On("GetOperationByID", ctx, "ns1", opID).Return(op, nil) + mdi.On("GetTransactionByID", mock.Anything, "ns1", mock.Anything).Return(nil, nil) mdi.On("InsertOperation", ctx, mock.Anything).Return(fmt.Errorf("pop")) om.RegisterHandler(ctx, &mockHandler{Prepared: po}, []core.OpType{core.OpTypeBlockchainPinBatch}) @@ -399,6 +516,7 @@ func TestRetryOperationUpdateFail(t *testing.T) { mdi := om.database.(*databasemocks.Plugin) mdi.On("GetOperationByID", ctx, "ns1", opID).Return(op, nil) + mdi.On("GetTransactionByID", mock.Anything, "ns1", mock.Anything).Return(nil, nil) mdi.On("InsertOperation", ctx, mock.Anything).Return(nil) mdi.On("UpdateOperation", ctx, "ns1", op.ID, mock.Anything, mock.Anything).Return(false, fmt.Errorf("pop")) @@ -490,29 +608,68 @@ func TestResubmitIdempotentOperation(t *testing.T) { ID: opID, Plugin: "blockchain", Type: core.OpTypeBlockchainPinBatch, - Status: core.OpStatusFailed, + Status: core.OpStatusInitialized, + } + po := &core.PreparedOperation{ + ID: op.ID, + Type: op.Type, + } + operations = append(operations, op) + + mdi := om.database.(*databasemocks.Plugin) + fb := database.OperationQueryFactory.NewFilter(ctx) + filter := fb.And( + fb.Eq("tx", id), + ) + om.RegisterHandler(ctx, &mockHandler{Prepared: po}, []core.OpType{core.OpTypeBlockchainPinBatch}) + mdi.On("GetOperations", ctx, "ns1", filter).Return(operations, nil, nil) + total, resubmitted, err := om.ResubmitOperations(ctx, id) + assert.NoError(t, err) + assert.Equal(t, total, 1) + assert.Len(t, resubmitted, 1) + + mdi.AssertExpectations(t) +} + +func TestResubmitIdempotentOperationSkipCached(t *testing.T) { + om, cancel := newTestOperations(t) + var id = fftypes.NewUUID() + defer cancel() + + ctx := context.Background() + opID := fftypes.NewUUID() + operations := make([]*core.Operation, 0) + op := &core.Operation{ + ID: opID, + Plugin: "blockchain", + Type: core.OpTypeBlockchainPinBatch, + Status: core.OpStatusInitialized, } po := &core.PreparedOperation{ ID: op.ID, Type: op.Type, } operations = append(operations, op) + opFlushInFlight := *op + opFlushInFlight.Status = core.OpStatusFailed + om.cache.Set(op.ID.String(), &opFlushInFlight) mdi := om.database.(*databasemocks.Plugin) fb := database.OperationQueryFactory.NewFilter(ctx) filter := fb.And( fb.Eq("tx", id), - fb.Eq("status", core.OpStatusInitialized), ) om.RegisterHandler(ctx, &mockHandler{Prepared: po}, []core.OpType{core.OpTypeBlockchainPinBatch}) mdi.On("GetOperations", ctx, "ns1", filter).Return(operations, nil, nil) - resubOp, err := om.ResubmitOperations(ctx, id) + total, resubmitted, err := om.ResubmitOperations(ctx, id) assert.NoError(t, err) - assert.Equal(t, op, resubOp) + assert.Equal(t, total, 1) + assert.Empty(t, resubmitted) mdi.AssertExpectations(t) } -func TestResubmitIdempotentOperationError(t *testing.T) { + +func TestResubmitIdempotentOperationLookupError(t *testing.T) { om, cancel := newTestOperations(t) var id = fftypes.NewUUID() defer cancel() @@ -536,12 +693,49 @@ func TestResubmitIdempotentOperationError(t *testing.T) { fb := database.OperationQueryFactory.NewFilter(ctx) filter := fb.And( fb.Eq("tx", id), - fb.Eq("status", core.OpStatusInitialized), ) om.RegisterHandler(ctx, &mockHandler{Prepared: po}, []core.OpType{core.OpTypeBlockchainPinBatch}) mdi.On("GetOperations", ctx, "ns1", filter).Return(operations, nil, fmt.Errorf("pop")) - _, err := om.ResubmitOperations(ctx, id) + _, _, err := om.ResubmitOperations(ctx, id) + assert.Error(t, err) + + mdi.AssertExpectations(t) +} + +func TestResubmitIdempotentOperationExecError(t *testing.T) { + om, cancel := newTestOperations(t) + var id = fftypes.NewUUID() + defer cancel() + + ctx := context.Background() + opID := fftypes.NewUUID() + operations := make([]*core.Operation, 0) + op := &core.Operation{ + ID: opID, + Plugin: "blockchain", + Type: core.OpTypeBlockchainPinBatch, + Status: core.OpStatusInitialized, + } + po := &core.PreparedOperation{ + ID: op.ID, + Type: op.Type, + } + operations = append(operations, op) + + mdi := om.database.(*databasemocks.Plugin) + fb := database.OperationQueryFactory.NewFilter(ctx) + filter := fb.And( + fb.Eq("tx", id), + ) + om.RegisterHandler(ctx, &mockHandler{Prepared: po, RunErr: fmt.Errorf("pop")}, []core.OpType{core.OpTypeBlockchainPinBatch}) + mdi.On("GetOperations", ctx, "ns1", filter).Return(operations, nil, nil) + _, _, err := om.ResubmitOperations(ctx, id) assert.Error(t, err) mdi.AssertExpectations(t) } + +func TestErrTernaryHelper(t *testing.T) { + assert.Equal(t, core.OpPhasePending, ErrTernary(nil, core.OpPhaseInitializing, core.OpPhasePending)) + assert.Equal(t, core.OpPhaseInitializing, ErrTernary(fmt.Errorf("pop"), core.OpPhaseInitializing, core.OpPhasePending)) +} diff --git a/internal/operations/operation_updater.go b/internal/operations/operation_updater.go index 0365d8fffb..9f3b6d3a6f 100644 --- a/internal/operations/operation_updater.go +++ b/internal/operations/operation_updater.go @@ -105,6 +105,12 @@ func (ou *operationUpdater) SubmitOperationUpdate(ctx context.Context, update *c } if ou.conf.workerCount > 0 { + if update.Status == core.OpStatusFailed { + // We do a cache update pre-emptively, as for idempotency checking on an error status we want to + // see the update immediately - even though it's being asynchronously flushed to the storage + ou.manager.updateCachedOperation(id, update.Status, &update.ErrorMessage, update.Output, nil) + } + select { case ou.pickWorker(ctx, id, update) <- update: case <-ou.ctx.Done(): diff --git a/internal/orchestrator/bound_callbacks.go b/internal/orchestrator/bound_callbacks.go index d19838e77e..5fc3c3df79 100644 --- a/internal/orchestrator/bound_callbacks.go +++ b/internal/orchestrator/bound_callbacks.go @@ -59,25 +59,11 @@ func (bc *boundCallbacks) SharedStorageBlobDownloaded(hash fftypes.Bytes32, size return bc.o.events.SharedStorageBlobDownloaded(bc.o.sharedstorage(), hash, size, payloadRef, dataID) } -func (bc *boundCallbacks) BatchPinComplete(namespace string, batch *blockchain.BatchPin, signingKey *core.VerifierRef) error { +func (bc *boundCallbacks) BlockchainEventBatch(batch []*blockchain.EventToDispatch) error { if err := bc.checkStopped(); err != nil { return err } - return bc.o.events.BatchPinComplete(namespace, batch, signingKey) -} - -func (bc *boundCallbacks) BlockchainNetworkAction(action string, location *fftypes.JSONAny, event *blockchain.Event, signingKey *core.VerifierRef) error { - if err := bc.checkStopped(); err != nil { - return err - } - return bc.o.events.BlockchainNetworkAction(action, location, event, signingKey) -} - -func (bc *boundCallbacks) BlockchainEvent(event *blockchain.EventWithSubscription) error { - if err := bc.checkStopped(); err != nil { - return err - } - return bc.o.events.BlockchainEvent(event) + return bc.o.events.BlockchainEventBatch(batch) } func (bc *boundCallbacks) DXEvent(plugin dataexchange.Plugin, event dataexchange.DXEvent) error { diff --git a/internal/orchestrator/bound_callbacks_test.go b/internal/orchestrator/bound_callbacks_test.go index 5702cf99a6..06bd633831 100644 --- a/internal/orchestrator/bound_callbacks_test.go +++ b/internal/orchestrator/bound_callbacks_test.go @@ -86,16 +86,8 @@ func TestBoundCallbacks(t *testing.T) { err = bc.SharedStorageBlobDownloaded(*hash, 12345, "payload1", dataID) assert.NoError(t, err) - mei.On("BatchPinComplete", "ns1", &blockchain.BatchPin{}, &core.VerifierRef{}).Return(nil) - err = bc.BatchPinComplete("ns1", &blockchain.BatchPin{}, &core.VerifierRef{}) - assert.NoError(t, err) - - mei.On("BlockchainNetworkAction", "action", fftypes.JSONAnyPtr("{}"), &blockchain.Event{}, &core.VerifierRef{}).Return(nil) - err = bc.BlockchainNetworkAction("action", fftypes.JSONAnyPtr("{}"), &blockchain.Event{}, &core.VerifierRef{}) - assert.NoError(t, err) - - mei.On("BlockchainEvent", &blockchain.EventWithSubscription{}).Return(nil) - err = bc.BlockchainEvent(&blockchain.EventWithSubscription{}) + mei.On("BlockchainEventBatch", []*blockchain.EventToDispatch{{Type: blockchain.EventTypeBatchPinComplete}}).Return(nil) + err = bc.BlockchainEventBatch([]*blockchain.EventToDispatch{{Type: blockchain.EventTypeBatchPinComplete}}) assert.NoError(t, err) mei.On("DXEvent", mdx, &dataexchangemocks.DXEvent{}).Return(nil) @@ -130,13 +122,7 @@ func TestBoundCallbacksStopped(t *testing.T) { err = bc.SharedStorageBlobDownloaded(*fftypes.NewRandB32(), 12345, "payload1", nil) assert.Regexp(t, "FF10446", err) - err = bc.BatchPinComplete("ns1", &blockchain.BatchPin{}, &core.VerifierRef{}) - assert.Regexp(t, "FF10446", err) - - err = bc.BlockchainNetworkAction("action", fftypes.JSONAnyPtr("{}"), &blockchain.Event{}, &core.VerifierRef{}) - assert.Regexp(t, "FF10446", err) - - err = bc.BlockchainEvent(&blockchain.EventWithSubscription{}) + err = bc.BlockchainEventBatch([]*blockchain.EventToDispatch{}) assert.Regexp(t, "FF10446", err) err = bc.DXEvent(nil, &dataexchangemocks.DXEvent{}) diff --git a/internal/orchestrator/orchestrator.go b/internal/orchestrator/orchestrator.go index 24d32b1acc..186782d633 100644 --- a/internal/orchestrator/orchestrator.go +++ b/internal/orchestrator/orchestrator.go @@ -43,6 +43,7 @@ import ( "github.com/hyperledger/firefly/internal/shareddownload" "github.com/hyperledger/firefly/internal/syncasync" "github.com/hyperledger/firefly/internal/txcommon" + "github.com/hyperledger/firefly/internal/txwriter" "github.com/hyperledger/firefly/pkg/blockchain" "github.com/hyperledger/firefly/pkg/core" "github.com/hyperledger/firefly/pkg/database" @@ -213,6 +214,7 @@ type orchestrator struct { cacheManager cache.Manager operations operations.Manager txHelper txcommon.Helper + txWriter txwriter.Writer } func NewOrchestrator(ns *core.Namespace, config Config, plugins *Plugins, metrics metrics.Manager, cacheManager cache.Manager) Orchestrator { @@ -244,6 +246,11 @@ func (or *orchestrator) Init() (err error) { return err } +func Purge(ctx context.Context, ns *core.Namespace, plugins *Plugins, dxNodeName string) { + // Clear all handlers on all plugins, as this namespace is never coming back + setHandlers(ctx, plugins, ns, dxNodeName, nil, nil) +} + func (or *orchestrator) database() database.Plugin { return or.plugins.Database.Plugin } @@ -285,6 +292,9 @@ func (or *orchestrator) Start() (err error) { if err == nil { err = or.operations.Start() } + if err == nil { + or.txWriter.Start() + } or.started = true return err @@ -318,6 +328,9 @@ func (or *orchestrator) WaitStop() { or.operations.WaitStop() or.operations = nil } + if or.txWriter != nil { + or.txWriter.Close() + } or.startedLock.Lock() defer or.startedLock.Unlock() or.started = false @@ -378,25 +391,36 @@ func (or *orchestrator) Identity() identity.Manager { } func (or *orchestrator) initHandlers(ctx context.Context) { - or.plugins.Database.Plugin.SetHandler(or.namespace.Name, or) + // Update all the handlers to point to this instance of the orchestrator + setHandlers(ctx, or.plugins, or.namespace, or.config.Multiparty.Node.Name, or, &or.bc) +} + +func setHandlers(ctx context.Context, + plugins *Plugins, + namespace *core.Namespace, + dxNodeName string, + dbc database.Callbacks, + bc *boundCallbacks, +) { + plugins.Database.Plugin.SetHandler(namespace.Name, dbc) - if or.plugins.Blockchain.Plugin != nil { - or.plugins.Blockchain.Plugin.SetHandler(or.namespace.Name, &or.bc) - or.plugins.Blockchain.Plugin.SetOperationHandler(or.namespace.Name, &or.bc) + if plugins.Blockchain.Plugin != nil { + plugins.Blockchain.Plugin.SetHandler(namespace.Name, bc) + plugins.Blockchain.Plugin.SetOperationHandler(namespace.Name, bc) } - if or.plugins.SharedStorage.Plugin != nil { - or.plugins.SharedStorage.Plugin.SetHandler(or.namespace.Name, &or.bc) + if plugins.SharedStorage.Plugin != nil { + plugins.SharedStorage.Plugin.SetHandler(namespace.Name, bc) } - if or.plugins.DataExchange.Plugin != nil { - or.plugins.DataExchange.Plugin.SetHandler(or.namespace.NetworkName, or.config.Multiparty.Node.Name, &or.bc) - or.plugins.DataExchange.Plugin.SetOperationHandler(or.namespace.Name, &or.bc) + if plugins.DataExchange.Plugin != nil { + plugins.DataExchange.Plugin.SetHandler(namespace.NetworkName, dxNodeName, bc) + plugins.DataExchange.Plugin.SetOperationHandler(namespace.Name, bc) } - for _, token := range or.plugins.Tokens { - token.Plugin.SetHandler(or.namespace.Name, &or.bc) - token.Plugin.SetOperationHandler(or.namespace.Name, &or.bc) + for _, token := range plugins.Tokens { + token.Plugin.SetHandler(namespace.Name, bc) + token.Plugin.SetOperationHandler(namespace.Name, bc) } } @@ -448,6 +472,10 @@ func (or *orchestrator) initManagers(ctx context.Context) (err error) { } } + if or.txWriter == nil { + or.txWriter = txwriter.NewTransactionWriter(ctx, or.namespace.Name, or.database(), or.txHelper, or.operations) + } + if or.config.Multiparty.Enabled { if or.multiparty == nil { or.multiparty, err = multiparty.NewMultipartyManager(or.ctx, or.namespace, or.config.Multiparty, or.database(), or.blockchain(), or.operations, or.metrics, or.txHelper) @@ -492,7 +520,7 @@ func (or *orchestrator) initManagers(ctx context.Context) (err error) { if or.blockchain() != nil { if or.contracts == nil { - or.contracts, err = contracts.NewContractManager(ctx, or.namespace.Name, or.database(), or.blockchain(), or.data, or.broadcast, or.messaging, or.batch, or.identity, or.operations, or.txHelper, or.syncasync) + or.contracts, err = contracts.NewContractManager(ctx, or.namespace.Name, or.database(), or.blockchain(), or.data, or.broadcast, or.messaging, or.batch, or.identity, or.operations, or.txHelper, or.txWriter, or.syncasync, or.cacheManager) if err != nil { return err } @@ -500,7 +528,7 @@ func (or *orchestrator) initManagers(ctx context.Context) (err error) { } if or.assets == nil { - or.assets, err = assets.NewAssetManager(ctx, or.namespace.Name, or.config.KeyNormalization, or.database(), or.tokens(), or.identity, or.syncasync, or.broadcast, or.messaging, or.metrics, or.operations, or.contracts, or.txHelper) + or.assets, err = assets.NewAssetManager(ctx, or.namespace.Name, or.config.KeyNormalization, or.database(), or.tokens(), or.identity, or.syncasync, or.broadcast, or.messaging, or.metrics, or.operations, or.contracts, or.txHelper, or.cacheManager) if err != nil { return err } @@ -555,7 +583,7 @@ func (or *orchestrator) SubmitNetworkAction(ctx context.Context, action *core.Ne if err != nil { return err } - return or.multiparty.SubmitNetworkAction(ctx, key, action) + return or.multiparty.SubmitNetworkAction(ctx, key, action, false /* network actions do not support idempotency keys currently */) } func (or *orchestrator) Authorize(ctx context.Context, authReq *fftypes.AuthReq) error { diff --git a/internal/orchestrator/orchestrator_test.go b/internal/orchestrator/orchestrator_test.go index 831a27ec82..a3a3d31ca7 100644 --- a/internal/orchestrator/orchestrator_test.go +++ b/internal/orchestrator/orchestrator_test.go @@ -51,7 +51,10 @@ import ( "github.com/hyperledger/firefly/mocks/spieventsmocks" "github.com/hyperledger/firefly/mocks/tokenmocks" "github.com/hyperledger/firefly/mocks/txcommonmocks" + "github.com/hyperledger/firefly/mocks/txwritermocks" "github.com/hyperledger/firefly/pkg/core" + "github.com/hyperledger/firefly/pkg/database" + "github.com/hyperledger/firefly/pkg/events" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" ) @@ -85,6 +88,7 @@ type testOrchestrator struct { mdh *definitionsmocks.Handler mmp *multipartymocks.Manager mds *definitionsmocks.Sender + mtw *txwritermocks.Writer } func (tor *testOrchestrator) cleanup(t *testing.T) { @@ -146,6 +150,7 @@ func newTestOrchestrator() *testOrchestrator { mdh: &definitionsmocks.Handler{}, mmp: &multipartymocks.Manager{}, mds: &definitionsmocks.Sender{}, + mtw: &txwritermocks.Writer{}, } tor.orchestrator.multiparty = tor.mmp tor.orchestrator.data = tor.mdm @@ -162,6 +167,7 @@ func newTestOrchestrator() *testOrchestrator { tor.orchestrator.operations = tor.mom tor.orchestrator.sharedDownload = tor.msd tor.orchestrator.txHelper = tor.mth + tor.orchestrator.txWriter = tor.mtw tor.orchestrator.defhandler = tor.mdh tor.orchestrator.defsender = tor.mds tor.orchestrator.config.Multiparty.Enabled = true @@ -194,6 +200,7 @@ func newTestOrchestrator() *testOrchestrator { tor.mcm.On("Name").Return("mock-cm").Maybe() tor.mmi.On("Name").Return("mock-mm").Maybe() tor.mmp.On("Name").Return("mock-mp").Maybe() + tor.mem.On("ResolveTransportAndCapabilities", mock.Anything, mock.Anything).Return("websockets", &events.Capabilities{}, nil).Maybe() tor.mds.On("Init", mock.Anything).Maybe() tor.cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(tor.ctx, 100, 5*time.Minute), nil).Maybe() return tor @@ -431,6 +438,16 @@ func TestStartBatchFail(t *testing.T) { assert.EqualError(t, err, "pop") } +func TestInitTXWriter(t *testing.T) { + or := newTestOrchestrator() + defer or.cleanup(t) + or.txWriter = nil + or.config.Multiparty.Enabled = false + or.mdi.On("Capabilities").Return(&database.Capabilities{Concurrency: false}) + err := or.initManagers(context.Background()) + assert.NoError(t, err) +} + func TestStartStopOk(t *testing.T) { coreconfig.Reset() or := newTestOrchestrator() @@ -441,24 +458,42 @@ func TestStartStopOk(t *testing.T) { or.mbm.On("Start").Return(nil) or.msd.On("Start").Return(nil) or.mom.On("Start").Return(nil) + or.mtw.On("Start").Return() or.mba.On("WaitStop").Return(nil) or.mbm.On("WaitStop").Return(nil) or.mdm.On("WaitStop").Return(nil) or.msd.On("WaitStop").Return(nil) or.mom.On("WaitStop").Return(nil) or.mem.On("WaitStop").Return(nil) + or.mtw.On("Close").Return(nil) err := or.Start() assert.NoError(t, err) or.WaitStop() or.WaitStop() // swallows dups } +func TestPurge(t *testing.T) { + coreconfig.Reset() + or := newTestOrchestrator() + defer or.cleanup(t) + // Note additional testing of this happens in namespace manager + or.mdi.On("SetHandler", mock.Anything, mock.Anything).Return(nil) + or.mbi.On("SetHandler", mock.Anything, mock.Anything).Return(nil) + or.mbi.On("SetOperationHandler", mock.Anything, mock.Anything).Return(nil) + or.mps.On("SetHandler", mock.Anything, mock.Anything).Return(nil) + or.mdx.On("SetHandler", mock.Anything, "Test1", mock.Anything).Return(nil) + or.mdx.On("SetOperationHandler", mock.Anything, mock.Anything).Return(nil) + or.mti.On("SetHandler", mock.Anything, mock.Anything).Return(nil) + or.mti.On("SetOperationHandler", mock.Anything, mock.Anything).Return(nil) + Purge(context.Background(), or.namespace, or.plugins, "Test1") +} + func TestNetworkAction(t *testing.T) { or := newTestOrchestrator() or.namespace.Name = core.LegacySystemNamespace action := &core.NetworkAction{Type: core.NetworkActionTerminate} or.mim.On("ResolveInputSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x123", nil) - or.mmp.On("SubmitNetworkAction", context.Background(), "0x123", action).Return(nil) + or.mmp.On("SubmitNetworkAction", context.Background(), "0x123", action, false).Return(nil) err := or.SubmitNetworkAction(context.Background(), action) assert.NoError(t, err) } diff --git a/internal/orchestrator/status.go b/internal/orchestrator/status.go index 9f4d4a9653..d4c8384ec4 100644 --- a/internal/orchestrator/status.go +++ b/internal/orchestrator/status.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -92,7 +92,7 @@ func (or *orchestrator) GetStatus(ctx context.Context) (status *core.NamespaceSt status.Org = &core.NamespaceStatusOrg{Name: or.config.Multiparty.Org.Name} status.Multiparty.Contracts = or.namespace.Contracts - org, err := or.identity.GetMultipartyRootOrg(ctx) + org, err := or.identity.GetRootOrg(ctx) if err != nil { log.L(ctx).Warnf("Failed to query local org for status: %s", err) } diff --git a/internal/orchestrator/status_test.go b/internal/orchestrator/status_test.go index fdff280845..1f4ca638c4 100644 --- a/internal/orchestrator/status_test.go +++ b/internal/orchestrator/status_test.go @@ -85,7 +85,7 @@ func TestGetStatusRegistered(t *testing.T) { orgID := fftypes.NewUUID() nodeID := fftypes.NewUUID() - or.mim.On("GetMultipartyRootOrg", or.ctx).Return(&core.Identity{ + or.mim.On("GetRootOrg", or.ctx).Return(&core.Identity{ IdentityBase: core.IdentityBase{ ID: orgID, Name: "org1", @@ -146,7 +146,7 @@ func TestGetStatusVerifierLookupFail(t *testing.T) { orgID := fftypes.NewUUID() - or.mim.On("GetMultipartyRootOrg", or.ctx).Return(&core.Identity{ + or.mim.On("GetRootOrg", or.ctx).Return(&core.Identity{ IdentityBase: core.IdentityBase{ ID: orgID, Name: "org1", @@ -173,7 +173,7 @@ func TestGetStatusWrongNodeOwner(t *testing.T) { orgID := fftypes.NewUUID() nodeID := fftypes.NewUUID() - or.mim.On("GetMultipartyRootOrg", or.ctx).Return(&core.Identity{ + or.mim.On("GetRootOrg", or.ctx).Return(&core.Identity{ IdentityBase: core.IdentityBase{ ID: orgID, Name: "org1", @@ -223,7 +223,7 @@ func TestGetStatusUnregistered(t *testing.T) { coreconfig.Reset() config.Set(coreconfig.NamespacesDefault, "default") - or.mim.On("GetMultipartyRootOrg", or.ctx).Return(nil, fmt.Errorf("pop")) + or.mim.On("GetRootOrg", or.ctx).Return(nil, fmt.Errorf("pop")) or.config.Multiparty.Org.Name = "org1" or.config.Multiparty.Node.Name = "node1" @@ -252,7 +252,7 @@ func TestGetStatusOrgOnlyRegistered(t *testing.T) { orgID := fftypes.NewUUID() - or.mim.On("GetMultipartyRootOrg", or.ctx).Return(&core.Identity{ + or.mim.On("GetRootOrg", or.ctx).Return(&core.Identity{ IdentityBase: core.IdentityBase{ ID: orgID, Name: "org1", @@ -304,7 +304,7 @@ func TestGetStatusNodeError(t *testing.T) { orgID := fftypes.NewUUID() - or.mim.On("GetMultipartyRootOrg", or.ctx).Return(&core.Identity{ + or.mim.On("GetRootOrg", or.ctx).Return(&core.Identity{ IdentityBase: core.IdentityBase{ ID: orgID, Name: "org1", diff --git a/internal/orchestrator/subscriptions.go b/internal/orchestrator/subscriptions.go index 63ac480f96..fcfd934124 100644 --- a/internal/orchestrator/subscriptions.go +++ b/internal/orchestrator/subscriptions.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -18,6 +18,7 @@ package orchestrator import ( "context" + "time" "github.com/hyperledger/firefly-common/pkg/ffapi" "github.com/hyperledger/firefly-common/pkg/fftypes" @@ -46,6 +47,36 @@ func (or *orchestrator) createUpdateSubscription(ctx context.Context, subDef *co if subDef.Transport == system.SystemEventsTransport { return nil, i18n.NewError(ctx, coremsgs.MsgSystemTransportInternal) } + resolvedTransport, capabilities, err := or.events.ResolveTransportAndCapabilities(ctx, subDef.Transport) + if err != nil { + return nil, err + } + subDef.Transport = resolvedTransport + + if subDef.Options.TLSConfigName != "" { + if or.namespace.TLSConfigs[subDef.Options.TLSConfigName] == nil { + return nil, i18n.NewError(ctx, coremsgs.MsgNotFoundTLSConfig, subDef.Options.TLSConfigName, subDef.Namespace) + + } + + subDef.Options.TLSConfig = or.namespace.TLSConfigs[subDef.Options.TLSConfigName] + } + + if subDef.Options.BatchTimeout != nil && *subDef.Options.BatchTimeout != "" { + _, err := fftypes.ParseDurationString(*subDef.Options.BatchTimeout, time.Millisecond) + if err != nil { + return nil, err + } + } + + if subDef.Options.Batch != nil && *subDef.Options.Batch { + if subDef.Options.WithData != nil && *subDef.Options.WithData { + return nil, i18n.NewError(ctx, coremsgs.MsgBatchWithDataNotSupported, subDef.Name) + } + if !capabilities.BatchDelivery { + return nil, i18n.NewError(ctx, coremsgs.MsgBatchDeliveryNotSupported, subDef.Transport) + } + } return subDef, or.events.CreateUpdateDurableSubscription(ctx, subDef, mustNew) } diff --git a/internal/orchestrator/subscriptions_test.go b/internal/orchestrator/subscriptions_test.go index f10e418299..58326469f5 100644 --- a/internal/orchestrator/subscriptions_test.go +++ b/internal/orchestrator/subscriptions_test.go @@ -18,13 +18,17 @@ package orchestrator import ( "context" + "crypto/tls" "fmt" "testing" "github.com/hyperledger/firefly-common/pkg/fftypes" "github.com/hyperledger/firefly/internal/events/system" + "github.com/hyperledger/firefly/internal/events/webhooks" + "github.com/hyperledger/firefly/mocks/eventmocks" "github.com/hyperledger/firefly/pkg/core" "github.com/hyperledger/firefly/pkg/database" + "github.com/hyperledger/firefly/pkg/events" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" ) @@ -54,6 +58,89 @@ func TestCreateSubscriptionSystemTransport(t *testing.T) { assert.Regexp(t, "FF10266", err) } +func TestCreateSubscriptionBadBatchTimeout(t *testing.T) { + or := newTestOrchestrator() + defer or.cleanup(t) + + badTimeout := "-abc" + _, err := or.CreateSubscription(or.ctx, &core.Subscription{ + SubscriptionRef: core.SubscriptionRef{ + Name: "sub1", + }, + Options: core.SubscriptionOptions{ + SubscriptionCoreOptions: core.SubscriptionCoreOptions{ + BatchTimeout: &badTimeout, + }, + WebhookSubOptions: core.WebhookSubOptions{ + URL: "http://example.com", + }, + }, + Transport: "webhooks", + }) + assert.Regexp(t, "FF00137", err) +} + +func TestCreateSubscriptionBatchNotSupported(t *testing.T) { + or := newTestOrchestrator() + defer or.cleanup(t) + + truthy := true + _, err := or.CreateSubscription(or.ctx, &core.Subscription{ + SubscriptionRef: core.SubscriptionRef{ + Name: "sub1", + }, + Options: core.SubscriptionOptions{ + SubscriptionCoreOptions: core.SubscriptionCoreOptions{ + Batch: &truthy, + }, + WebhookSubOptions: core.WebhookSubOptions{ + URL: "http://example.com", + }, + }, + Transport: "webhooks", + }) + assert.Regexp(t, "FF10461", err) +} + +func TestCreateSubscriptionBatchWithData(t *testing.T) { + or := newTestOrchestrator() + defer or.cleanup(t) + + truthy := true + _, err := or.CreateSubscription(or.ctx, &core.Subscription{ + SubscriptionRef: core.SubscriptionRef{ + Name: "sub1", + }, + Options: core.SubscriptionOptions{ + SubscriptionCoreOptions: core.SubscriptionCoreOptions{ + WithData: &truthy, + Batch: &truthy, + }, + WebhookSubOptions: core.WebhookSubOptions{ + URL: "http://example.com", + }, + }, + Transport: "webhooks", + }) + assert.Regexp(t, "FF10460", err) +} + +func TestCreateSubscriptionBadTransport(t *testing.T) { + or := newTestOrchestrator() + defer or.cleanup(t) + + or.mem = &eventmocks.EventManager{} + or.mem.On("ResolveTransportAndCapabilities", mock.Anything, "wrongun").Return("", nil, fmt.Errorf("not found")) + or.events = or.mem + _, err := or.CreateSubscription(or.ctx, &core.Subscription{ + SubscriptionRef: core.SubscriptionRef{ + Name: "sub1", + }, + Transport: "wrongun", + }) + assert.Regexp(t, "not found", err) +} + func TestCreateSubscriptionOk(t *testing.T) { or := newTestOrchestrator() defer or.cleanup(t) @@ -70,6 +157,60 @@ func TestCreateSubscriptionOk(t *testing.T) { assert.Equal(t, "ns", sub.Namespace) } +func TestCreateSubscriptionTLSConfigOk(t *testing.T) { + or := newTestOrchestrator() + defer or.cleanup(t) + + mockTlSConfig := &tls.Config{} + + or.namespace.TLSConfigs = map[string]*tls.Config{ + "myconfig": mockTlSConfig, + } + + sub := &core.Subscription{ + SubscriptionRef: core.SubscriptionRef{ + Name: "sub1", + }, + Options: core.SubscriptionOptions{ + WebhookSubOptions: core.WebhookSubOptions{ + TLSConfigName: "myconfig", + }, + }, + Transport: "webhooks", + } + + or.mem.On("CreateUpdateDurableSubscription", mock.Anything, mock.Anything, true).Return(nil) + s1, err := or.CreateSubscription(or.ctx, sub) + assert.NoError(t, err) + assert.Equal(t, s1, sub) + assert.Equal(t, "ns", sub.Namespace) + assert.Equal(t, mockTlSConfig, s1.Options.TLSConfig) +} + +func TestCreateSubscriptionTLSConfigNotFound(t *testing.T) { + or := newTestOrchestrator() + defer or.cleanup(t) + + or.plugins.Events = map[string]events.Plugin{ + "webhooks": &webhooks.WebHooks{}, + } + + sub := &core.Subscription{ + SubscriptionRef: core.SubscriptionRef{ + Name: "sub1", + }, + Options: core.SubscriptionOptions{ + WebhookSubOptions: core.WebhookSubOptions{ + TLSConfigName: "myconfig", + }, + }, + Transport: "webhooks", + } + _, err := or.CreateSubscription(or.ctx, sub) + assert.Error(t, err) + assert.Regexp(t, "FF10455", err) +} + func TestCreateUpdateSubscriptionOk(t *testing.T) { or := newTestOrchestrator() defer or.cleanup(t) diff --git a/internal/orchestrator/txn_status.go b/internal/orchestrator/txn_status.go index 2d0a30f3e7..7741616bfe 100644 --- a/internal/orchestrator/txn_status.go +++ b/internal/orchestrator/txn_status.go @@ -81,6 +81,15 @@ func (or *orchestrator) GetTransactionStatus(ctx context.Context, id string) (*c } for _, op := range ops { result.Details = append(result.Details, txOperationStatus(op)) + if op.Status == core.OpStatusPending { + // Check to see if there's an update + // Operations can stay in "Pending" if FireFly was down when a TX receipt became available + opWithDetail, err := or.GetOperationByIDWithStatus(ctx, op.ID.String()) + if err != nil { + return nil, err + } + op = &opWithDetail.Operation + } if op.Retry == nil { updateStatus(result, op.Status) } @@ -126,7 +135,7 @@ func (or *orchestrator) GetTransactionStatus(ctx context.Context, id string) (*c case len(pools) == 0: result.Details = append(result.Details, pendingPlaceholder(core.TransactionStatusTypeTokenPool)) updateStatus(result, core.OpStatusPending) - case pools[0].State != core.TokenPoolStateConfirmed: + case !pools[0].Active: result.Details = append(result.Details, &core.TransactionStatusDetails{ Status: core.OpStatusPending, Type: core.TransactionStatusTypeTokenPool, diff --git a/internal/orchestrator/txn_status_test.go b/internal/orchestrator/txn_status_test.go index b01e31fcd9..3d14dc320a 100644 --- a/internal/orchestrator/txn_status_test.go +++ b/internal/orchestrator/txn_status_test.go @@ -258,7 +258,7 @@ func TestGetTransactionStatusTokenPoolSuccess(t *testing.T) { ID: fftypes.NewUUID(), Type: core.TokenTypeFungible, Created: fftypes.UnixTime(0), - State: core.TokenPoolStateConfirmed, + Active: true, }, } @@ -378,7 +378,7 @@ func TestGetTransactionStatusTokenPoolUnconfirmed(t *testing.T) { ID: fftypes.NewUUID(), Type: core.TokenTypeFungible, Created: fftypes.UnixTime(0), - State: core.TokenPoolStatePending, + Active: false, }, } @@ -650,6 +650,9 @@ func TestGetTransactionStatusTokenTransferRetry(t *testing.T) { or.mth.On("GetTransactionByIDCached", mock.Anything, txID).Return(tx, nil) or.mdi.On("GetOperations", mock.Anything, "ns", mock.Anything).Return(ops, nil, nil) + or.mom.On("GetOperationByIDCached", mock.Anything, op1ID).Return(ops[0], nil) + or.mom.On("GetOperationByIDCached", mock.Anything, op2ID).Return(ops[1], nil) + or.mbi.On("GetTransactionStatus", mock.Anything, mock.Anything).Return(nil, nil) or.mdi.On("GetBlockchainEvents", mock.Anything, "ns", mock.Anything).Return(events, nil, nil) or.mdi.On("GetTokenTransfers", mock.Anything, "ns", mock.Anything).Return(transfers, nil, nil) @@ -940,3 +943,29 @@ func TestGetTransactionStatusUnknownType(t *testing.T) { or.mdi.AssertExpectations(t) } + +func TestGetTransactionStatusOpStatusError(t *testing.T) { + or := newTestOrchestrator() + + txID := fftypes.NewUUID() + tx := &core.Transaction{ + Namespace: "ns1", + Type: core.TransactionTypeTokenTransfer, + } + op1ID := fftypes.NewUUID() + ops := []*core.Operation{ + { + Namespace: "ns1", + Status: core.OpStatusPending, + ID: op1ID, + Type: core.OpTypeTokenTransfer, + }, + } + + or.mth.On("GetTransactionByIDCached", mock.Anything, txID).Return(tx, nil) + or.mdi.On("GetOperations", mock.Anything, "ns", mock.Anything).Return(ops, nil, nil) + or.mom.On("GetOperationByIDCached", mock.Anything, op1ID).Return(nil, fmt.Errorf("pop")) + + _, err := or.GetTransactionStatus(context.Background(), txID.String()) + assert.EqualError(t, err, "pop") +} diff --git a/internal/privatemessaging/message_test.go b/internal/privatemessaging/message_test.go index af4e30727d..be2084fd02 100644 --- a/internal/privatemessaging/message_test.go +++ b/internal/privatemessaging/message_test.go @@ -81,7 +81,7 @@ func TestSendConfirmMessageE2EOk(t *testing.T) { intermediateOrg.Parent = rootOrg.ID localNode := newTestNode("node1", intermediateOrg) mim.On("ResolveInputSigningIdentity", pm.ctx, mock.Anything).Return(nil) - mim.On("GetMultipartyRootOrg", pm.ctx).Return(intermediateOrg, nil) + mim.On("GetRootOrg", pm.ctx).Return(intermediateOrg, nil) mim.On("GetLocalNode", pm.ctx).Return(localNode, nil) mim.On("CachedIdentityLookupMustExist", pm.ctx, "org1").Return(intermediateOrg, false, nil) mim.On("CachedIdentityLookupByID", pm.ctx, rootOrg.ID).Return(rootOrg, nil) @@ -224,7 +224,7 @@ func TestResolveAndSendBadInlineData(t *testing.T) { localOrg := newTestOrg("localorg") localNode := newTestNode("node1", localOrg) mim.On("ResolveInputSigningIdentity", pm.ctx, mock.Anything).Return(nil) - mim.On("GetMultipartyRootOrg", pm.ctx).Return(localOrg, nil) + mim.On("GetRootOrg", pm.ctx).Return(localOrg, nil) mim.On("GetLocalNode", pm.ctx).Return(localNode, nil) mim.On("ResolveInputSigningIdentity", pm.ctx, mock.Anything).Run(func(args mock.Arguments) { identity := args[2].(*core.SignerRef) @@ -341,7 +341,7 @@ func TestMessagePrepare(t *testing.T) { localOrg := newTestOrg("localorg") localNode := newTestNode("node1", localOrg) mim.On("ResolveInputSigningIdentity", pm.ctx, mock.Anything).Return(nil) - mim.On("GetMultipartyRootOrg", pm.ctx).Return(localOrg, nil) + mim.On("GetRootOrg", pm.ctx).Return(localOrg, nil) mim.On("GetLocalNode", pm.ctx).Return(localNode, nil) mim.On("ResolveInputSigningIdentity", pm.ctx, mock.Anything).Run(func(args mock.Arguments) { identity := args[1].(*core.SignerRef) @@ -663,7 +663,7 @@ func TestDispatchedUnpinnedMessageOK(t *testing.T) { mom.On("RunOperation", pm.ctx, mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(batchSendData) return op.Type == core.OpTypeDataExchangeSendBatch && *data.Node.ID == *node2.ID - })).Return(nil, nil) + }), false).Return(nil, nil) err := pm.dispatchUnpinnedBatch(pm.ctx, &batch.DispatchPayload{ Batch: core.BatchPersisted{ @@ -709,7 +709,7 @@ func TestSendDataTransferBlobsFail(t *testing.T) { assert.Equal(t, "localorg", identity.Author) return true })).Return(nil) - mim.On("GetMultipartyRootOrg", pm.ctx).Return(localOrg, nil) + mim.On("GetRootOrg", pm.ctx).Return(localOrg, nil) mim.On("GetLocalNode", pm.ctx).Return(node1, nil) mdi := pm.database.(*databasemocks.Plugin) @@ -767,7 +767,7 @@ func TestSendDataTransferFail(t *testing.T) { mom.On("RunOperation", pm.ctx, mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(batchSendData) return op.Type == core.OpTypeDataExchangeSendBatch && *data.Node.ID == *node2.ID - })).Return(nil, fmt.Errorf("pop")) + }), false).Return(nil, fmt.Errorf("pop")) err := pm.sendData(pm.ctx, &core.TransportWrapper{ Batch: &core.Batch{ @@ -814,7 +814,7 @@ func TestSendDataTransferInsertOperationFail(t *testing.T) { assert.Equal(t, "localorg", identity.Author) return true })).Return(nil) - mim.On("GetMultipartyRootOrg", pm.ctx).Return(localOrg, nil) + mim.On("GetRootOrg", pm.ctx).Return(localOrg, nil) mim.On("GetLocalNode", pm.ctx).Return(node1, nil) mom := pm.operations.(*operationmocks.Manager) diff --git a/internal/privatemessaging/operations.go b/internal/privatemessaging/operations.go index 7c5cf2174a..5a6ef8c0f4 100644 --- a/internal/privatemessaging/operations.go +++ b/internal/privatemessaging/operations.go @@ -137,29 +137,29 @@ func (pm *privateMessaging) PrepareOperation(ctx context.Context, op *core.Opera } } -func (pm *privateMessaging) RunOperation(ctx context.Context, op *core.PreparedOperation) (outputs fftypes.JSONObject, complete bool, err error) { +func (pm *privateMessaging) RunOperation(ctx context.Context, op *core.PreparedOperation) (outputs fftypes.JSONObject, phase core.OpPhase, err error) { switch data := op.Data.(type) { case transferBlobData: localNode, err := pm.identity.GetLocalNode(ctx) if err != nil { - return nil, false, err + return nil, core.OpPhaseInitializing, err } - return nil, false, pm.exchange.TransferBlob(ctx, op.NamespacedIDString(), data.Node.Profile, localNode.Profile, data.Blob.PayloadRef) + return nil, core.OpPhaseInitializing, pm.exchange.TransferBlob(ctx, op.NamespacedIDString(), data.Node.Profile, localNode.Profile, data.Blob.PayloadRef) case batchSendData: localNode, err := pm.identity.GetLocalNode(ctx) if err != nil { - return nil, false, err + return nil, core.OpPhaseInitializing, err } payload, err := json.Marshal(data.Transport) if err != nil { - return nil, false, i18n.WrapError(ctx, err, coremsgs.MsgSerializationFailed) + return nil, core.OpPhaseInitializing, i18n.WrapError(ctx, err, coremsgs.MsgSerializationFailed) } - return nil, false, pm.exchange.SendMessage(ctx, op.NamespacedIDString(), data.Node.Profile, localNode.Profile, payload) + return nil, core.OpPhaseInitializing, pm.exchange.SendMessage(ctx, op.NamespacedIDString(), data.Node.Profile, localNode.Profile, payload) default: - return nil, false, i18n.NewError(ctx, coremsgs.MsgOperationDataIncorrect, op.Data) + return nil, core.OpPhaseInitializing, i18n.NewError(ctx, coremsgs.MsgOperationDataIncorrect, op.Data) } } diff --git a/internal/privatemessaging/operations_test.go b/internal/privatemessaging/operations_test.go index a506f37ea4..8b1ad8df84 100644 --- a/internal/privatemessaging/operations_test.go +++ b/internal/privatemessaging/operations_test.go @@ -6,7 +6,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -81,9 +81,9 @@ func TestPrepareAndRunTransferBlob(t *testing.T) { assert.Equal(t, node, po.Data.(transferBlobData).Node) assert.Equal(t, blob, po.Data.(transferBlobData).Blob) - _, complete, err := pm.RunOperation(context.Background(), po) + _, phase, err := pm.RunOperation(context.Background(), po) - assert.False(t, complete) + assert.Equal(t, core.OpPhaseInitializing, phase) assert.NoError(t, err) mdi.AssertExpectations(t) @@ -150,9 +150,9 @@ func TestPrepareAndRunBatchSend(t *testing.T) { assert.Equal(t, group, po.Data.(batchSendData).Transport.Group) assert.Equal(t, batch, po.Data.(batchSendData).Transport.Batch) - _, complete, err := pm.RunOperation(context.Background(), po) + _, phase, err := pm.RunOperation(context.Background(), po) - assert.False(t, complete) + assert.Equal(t, core.OpPhaseInitializing, phase) assert.NoError(t, err) mdi.AssertExpectations(t) @@ -553,9 +553,9 @@ func TestRunOperationNotSupported(t *testing.T) { pm, cancel := newTestPrivateMessaging(t) defer cancel() - _, complete, err := pm.RunOperation(context.Background(), &core.PreparedOperation{}) + _, phase, err := pm.RunOperation(context.Background(), &core.PreparedOperation{}) - assert.False(t, complete) + assert.Equal(t, core.OpPhaseInitializing, phase) assert.Regexp(t, "FF10378", err) } @@ -592,9 +592,9 @@ func TestRunOperationBatchSendInvalidData(t *testing.T) { }, } - _, complete, err := pm.RunOperation(context.Background(), opSendBatch(op, node, transport)) + _, phase, err := pm.RunOperation(context.Background(), opSendBatch(op, node, transport)) - assert.False(t, complete) + assert.Equal(t, core.OpPhaseInitializing, phase) assert.Regexp(t, "FF10137", err) } @@ -621,9 +621,9 @@ func TestRunOperationBatchSendNodeFail(t *testing.T) { }, } - _, complete, err := pm.RunOperation(context.Background(), opSendBatch(op, node, transport)) + _, phase, err := pm.RunOperation(context.Background(), opSendBatch(op, node, transport)) - assert.False(t, complete) + assert.Equal(t, core.OpPhaseInitializing, phase) assert.EqualError(t, err, "pop") } @@ -640,9 +640,9 @@ func TestRunOperationBlobSendNodeFail(t *testing.T) { mim := pm.identity.(*identitymanagermocks.Manager) mim.On("GetLocalNode", context.Background()).Return(nil, fmt.Errorf("pop")) - _, complete, err := pm.RunOperation(context.Background(), opSendBlob(op, node, &core.Blob{})) + _, phase, err := pm.RunOperation(context.Background(), opSendBlob(op, node, &core.Blob{})) - assert.False(t, complete) + assert.Equal(t, core.OpPhaseInitializing, phase) assert.EqualError(t, err, "pop") } diff --git a/internal/privatemessaging/privatemessaging.go b/internal/privatemessaging/privatemessaging.go index c41e4ce4d4..fef50c04d9 100644 --- a/internal/privatemessaging/privatemessaging.go +++ b/internal/privatemessaging/privatemessaging.go @@ -54,7 +54,7 @@ type Manager interface { // From operations.OperationHandler PrepareOperation(ctx context.Context, op *core.Operation) (*core.PreparedOperation, error) - RunOperation(ctx context.Context, op *core.PreparedOperation) (outputs fftypes.JSONObject, complete bool, err error) + RunOperation(ctx context.Context, op *core.PreparedOperation) (outputs fftypes.JSONObject, phase core.OpPhase, err error) } type privateMessaging struct { @@ -179,7 +179,7 @@ func (pm *privateMessaging) dispatchPinnedBatch(ctx context.Context, payload *ba } log.L(ctx).Infof("Pinning private batch %s with author=%s key=%s group=%s", payload.Batch.ID, payload.Batch.Author, payload.Batch.Key, payload.Batch.Group) - return pm.multiparty.SubmitBatchPin(ctx, &payload.Batch, payload.Pins, "" /* no payloadRef for private */) + return pm.multiparty.SubmitBatchPin(ctx, &payload.Batch, payload.Pins, "" /* no payloadRef for private */, false /* batch processing does not currently use idempotency keys */) } func (pm *privateMessaging) dispatchUnpinnedBatch(ctx context.Context, payload *batch.DispatchPayload) error { @@ -263,7 +263,7 @@ func (pm *privateMessaging) submitBlobTransfersToDX(ctx context.Context, tracker go func(tracker *blobTransferTracker) { defer wg.Done() log.L(ctx).Debugf("Initiating DX transfer blob=%s data=%s operation=%s", tracker.blobHash, tracker.dataID, tracker.op.ID) - if _, err := pm.operations.RunOperation(ctx, tracker.op); err != nil { + if _, err := pm.operations.RunOperation(ctx, tracker.op, false /* batch processing does not currently use idempotency keys */); err != nil { log.L(ctx).Errorf("Failed to initiate DX transfer blob=%s data=%s operation=%s", tracker.blobHash, tracker.dataID, tracker.op.ID) if firstError == nil { firstError = err @@ -329,7 +329,7 @@ func (pm *privateMessaging) sendData(ctx context.Context, tw *core.TransportWrap } // Then initiate the batch transfer - if _, err = pm.operations.RunOperation(ctx, sendBatchOp); err != nil { + if _, err = pm.operations.RunOperation(ctx, sendBatchOp, false /* batch processing does not currently use idempotency keys */); err != nil { return err } } diff --git a/internal/privatemessaging/privatemessaging_test.go b/internal/privatemessaging/privatemessaging_test.go index ea994a2ea7..731ef83cf6 100644 --- a/internal/privatemessaging/privatemessaging_test.go +++ b/internal/privatemessaging/privatemessaging_test.go @@ -215,16 +215,16 @@ func TestDispatchBatchWithBlobs(t *testing.T) { } data := op.Data.(transferBlobData) return *data.Node.ID == *node2.ID - })).Return(nil, nil) + }), false).Return(nil, nil) mom.On("RunOperation", pm.ctx, mock.MatchedBy(func(op *core.PreparedOperation) bool { if op.Type != core.OpTypeDataExchangeSendBatch { return false } data := op.Data.(batchSendData) return *data.Node.ID == *node2.ID - })).Return(nil, nil) + }), false).Return(nil, nil) - mmp.On("SubmitBatchPin", pm.ctx, mock.Anything, mock.Anything, "").Return(nil) + mmp.On("SubmitBatchPin", pm.ctx, mock.Anything, mock.Anything, "", false).Return(nil) err := pm.dispatchPinnedBatch(pm.ctx, &batch.DispatchPayload{ Batch: core.BatchPersisted{ @@ -438,7 +438,7 @@ func TestSendSubmitBlobTransferFail(t *testing.T) { mom.On("RunOperation", pm.ctx, mock.MatchedBy(func(op *core.PreparedOperation) bool { data := op.Data.(transferBlobData) return op.Type == core.OpTypeDataExchangeSendBlob && *data.Node.ID == *node2.ID - })).Return(nil, fmt.Errorf("pop")) + }), false).Return(nil, fmt.Errorf("pop")) err := pm.dispatchPinnedBatch(pm.ctx, &batch.DispatchPayload{ Batch: core.BatchPersisted{ @@ -496,14 +496,14 @@ func TestWriteTransactionSubmitBatchPinFail(t *testing.T) { } data := op.Data.(transferBlobData) return *data.Node.ID == *node2.ID - })).Return(nil, nil) + }), false).Return(nil, nil) mom.On("RunOperation", pm.ctx, mock.MatchedBy(func(op *core.PreparedOperation) bool { if op.Type != core.OpTypeDataExchangeSendBatch { return false } data := op.Data.(batchSendData) return *data.Node.ID == *node2.ID - })).Return(nil, nil) + }), false).Return(nil, nil) mdi.On("GetBlobs", pm.ctx, "ns1", mock.Anything).Return([]*core.Blob{{ Hash: blob1, @@ -511,7 +511,7 @@ func TestWriteTransactionSubmitBatchPinFail(t *testing.T) { }}, nil, nil) mmp := pm.multiparty.(*multipartymocks.Manager) - mmp.On("SubmitBatchPin", pm.ctx, mock.Anything, mock.Anything, "").Return(fmt.Errorf("pop")) + mmp.On("SubmitBatchPin", pm.ctx, mock.Anything, mock.Anything, "", false).Return(fmt.Errorf("pop")) err := pm.dispatchPinnedBatch(pm.ctx, &batch.DispatchPayload{ Batch: core.BatchPersisted{ diff --git a/internal/privatemessaging/recipients.go b/internal/privatemessaging/recipients.go index b4b562d2b3..4c4c1a882a 100644 --- a/internal/privatemessaging/recipients.go +++ b/internal/privatemessaging/recipients.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -108,7 +108,7 @@ func (pm *privateMessaging) resolveNode(ctx context.Context, identity *core.Iden func (pm *privateMessaging) getRecipients(ctx context.Context, in *core.MessageInOut) (gi *core.GroupIdentity, err error) { - localOrg, err := pm.identity.GetMultipartyRootOrg(ctx) + localOrg, err := pm.identity.GetRootOrg(ctx) if err != nil { return nil, err } diff --git a/internal/privatemessaging/recipients_test.go b/internal/privatemessaging/recipients_test.go index 77e2471303..d8d97af30d 100644 --- a/internal/privatemessaging/recipients_test.go +++ b/internal/privatemessaging/recipients_test.go @@ -52,7 +52,7 @@ func TestResolveMemberListNewGroupE2E(t *testing.T) { mim.On("CachedIdentityLookupMustExist", pm.ctx, remoteOrg.DID).Return(remoteOrg, false, nil) mim.On("CachedIdentityLookupByID", pm.ctx, localNode.ID).Return(localNode, nil) mim.On("CachedIdentityLookupByID", pm.ctx, remoteNode.ID).Return(remoteNode, nil) - mim.On("GetMultipartyRootOrg", pm.ctx).Return(localOrg, nil) + mim.On("GetRootOrg", pm.ctx).Return(localOrg, nil) mim.On("GetLocalNode", pm.ctx).Return(localNode, nil) mim.On("ValidateNodeOwner", pm.ctx, localNode, localOrg).Return(true, nil) mim.On("ValidateNodeOwner", pm.ctx, remoteNode, remoteOrg).Return(true, nil) @@ -125,7 +125,7 @@ func TestResolveMemberListExistingGroup(t *testing.T) { mdi.On("GetGroupByHash", pm.ctx, "ns1", mock.Anything, mock.Anything).Return(&core.Group{Hash: fftypes.NewRandB32()}, nil, nil).Once() mim := pm.identity.(*identitymanagermocks.Manager) mim.On("CachedIdentityLookupMustExist", pm.ctx, "org1").Return(localOrg, false, nil) - mim.On("GetMultipartyRootOrg", pm.ctx).Return(localOrg, nil) + mim.On("GetRootOrg", pm.ctx).Return(localOrg, nil) mim.On("GetLocalNode", pm.ctx).Return(localNode, nil) err := pm.resolveRecipientList(pm.ctx, &core.MessageInOut{ @@ -159,7 +159,7 @@ func TestResolveMemberListLookupFail(t *testing.T) { mim := pm.identity.(*identitymanagermocks.Manager) mim.On("CachedIdentityLookupMustExist", pm.ctx, "org1").Return(nil, true, fmt.Errorf("pop")) - mim.On("GetMultipartyRootOrg", pm.ctx).Return(localOrg, nil) + mim.On("GetRootOrg", pm.ctx).Return(localOrg, nil) mim.On("GetLocalNode", pm.ctx).Return(localNode, nil) err := pm.resolveRecipientList(pm.ctx, &core.MessageInOut{ @@ -195,7 +195,7 @@ func TestResolveMemberListGetGroupsFail(t *testing.T) { mdi.On("GetGroupByHash", pm.ctx, "ns1", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("pop")) mim := pm.identity.(*identitymanagermocks.Manager) mim.On("CachedIdentityLookupMustExist", pm.ctx, "org1").Return(localOrg, false, nil) - mim.On("GetMultipartyRootOrg", pm.ctx).Return(localOrg, nil) + mim.On("GetRootOrg", pm.ctx).Return(localOrg, nil) mim.On("GetLocalNode", pm.ctx).Return(localNode, nil) err := pm.resolveRecipientList(pm.ctx, &core.MessageInOut{ @@ -226,7 +226,7 @@ func TestResolveMemberListLocalOrgUnregistered(t *testing.T) { defer cancel() mim := pm.identity.(*identitymanagermocks.Manager) - mim.On("GetMultipartyRootOrg", pm.ctx).Return(nil, fmt.Errorf("pop")) + mim.On("GetRootOrg", pm.ctx).Return(nil, fmt.Errorf("pop")) err := pm.resolveRecipientList(pm.ctx, &core.MessageInOut{ Message: core.Message{ @@ -257,7 +257,7 @@ func TestResolveMemberListLocalMemberLookupFailed(t *testing.T) { localOrg := newTestOrg("localorg") mim := pm.identity.(*identitymanagermocks.Manager) - mim.On("GetMultipartyRootOrg", pm.ctx).Return(localOrg, nil) + mim.On("GetRootOrg", pm.ctx).Return(localOrg, nil) mim.On("GetLocalNode", pm.ctx).Return(nil, fmt.Errorf("pop")) err := pm.resolveRecipientList(pm.ctx, &core.MessageInOut{ @@ -294,7 +294,7 @@ func TestResolveMemberListNodeNotFound(t *testing.T) { mim := pm.identity.(*identitymanagermocks.Manager) mim.On("CachedIdentityLookupMustExist", pm.ctx, "org1").Return(localOrg, false, nil) - mim.On("GetMultipartyRootOrg", pm.ctx).Return(localOrg, nil) + mim.On("GetRootOrg", pm.ctx).Return(localOrg, nil) mim.On("GetLocalNode", pm.ctx).Return(localNode, nil) err := pm.resolveRecipientList(pm.ctx, &core.MessageInOut{ @@ -333,7 +333,7 @@ func TestResolveMemberNodeOwnedParentOrg(t *testing.T) { mdi.On("GetIdentities", pm.ctx, "ns1", mock.Anything).Return([]*core.Identity{localNode}, nil, nil) mdi.On("GetGroupByHash", pm.ctx, "ns1", mock.Anything, mock.Anything).Return(&core.Group{Hash: fftypes.NewRandB32()}, nil, nil).Once() mim := pm.identity.(*identitymanagermocks.Manager) - mim.On("GetMultipartyRootOrg", pm.ctx).Return(parentOrg, nil) + mim.On("GetRootOrg", pm.ctx).Return(parentOrg, nil) mim.On("GetLocalNode", pm.ctx).Return(localNode, nil) mim.On("CachedIdentityLookupMustExist", pm.ctx, "org1").Return(childOrg, false, nil) mim.On("CachedIdentityLookupByID", pm.ctx, parentOrg.ID).Return(parentOrg, nil) diff --git a/internal/reference/reference.go b/internal/reference/reference.go index 7a7db397ec..19432f5c27 100644 --- a/internal/reference/reference.go +++ b/internal/reference/reference.go @@ -404,7 +404,7 @@ func GenerateObjectsReferenceMarkdown(ctx context.Context) (map[string][]byte, e Locator: "address=0x056df1c53c3c00b0e13d37543f46930b42f71db0&schema=ERC20WithData&type=fungible", Decimals: 18, Connector: "erc20_erc721", - State: core.TokenPoolStateConfirmed, + Active: true, Message: fftypes.MustParseUUID("43923040-b1e5-4164-aa20-47636c7177ee"), Info: fftypes.JSONObject{ "address": "0x056df1c53c3c00b0e13d37543f46930b42f71db0", diff --git a/internal/shareddownload/download_manager.go b/internal/shareddownload/download_manager.go index fcfb8aa7a4..56cf25877d 100644 --- a/internal/shareddownload/download_manager.go +++ b/internal/shareddownload/download_manager.go @@ -39,8 +39,8 @@ type Manager interface { Start() error WaitStop() - InitiateDownloadBatch(ctx context.Context, tx *fftypes.UUID, payloadRef string) error - InitiateDownloadBlob(ctx context.Context, tx *fftypes.UUID, dataID *fftypes.UUID, payloadRef string) error + InitiateDownloadBatch(ctx context.Context, tx *fftypes.UUID, payloadRef string, idempotentSubmit bool) error + InitiateDownloadBlob(ctx context.Context, tx *fftypes.UUID, dataID *fftypes.UUID, payloadRef string, idempotentSubmit bool) error } // downloadManager operates a number of workers that can perform downloads/retries. Each download @@ -69,9 +69,10 @@ type downloadManager struct { } type downloadWork struct { - dispatchedAt time.Time - preparedOp *core.PreparedOperation - attempts int + dispatchedAt time.Time + preparedOp *core.PreparedOperation + attempts int + idempotentSubmit bool } type Callbacks interface { @@ -224,25 +225,26 @@ func (dm *downloadManager) waitAndRetryDownload(work *downloadWork) { dm.dispatchWork(work) } -func (dm *downloadManager) InitiateDownloadBatch(ctx context.Context, tx *fftypes.UUID, payloadRef string) error { +func (dm *downloadManager) InitiateDownloadBatch(ctx context.Context, tx *fftypes.UUID, payloadRef string, idempotentSubmit bool) error { op := core.NewOperation(dm.sharedstorage, dm.namespace.Name, tx, core.OpTypeSharedStorageDownloadBatch) addDownloadBatchInputs(op, payloadRef) - return dm.createAndDispatchOp(ctx, op, opDownloadBatch(op, payloadRef)) + return dm.createAndDispatchOp(ctx, op, opDownloadBatch(op, payloadRef), idempotentSubmit) } -func (dm *downloadManager) InitiateDownloadBlob(ctx context.Context, tx *fftypes.UUID, dataID *fftypes.UUID, payloadRef string) error { +func (dm *downloadManager) InitiateDownloadBlob(ctx context.Context, tx *fftypes.UUID, dataID *fftypes.UUID, payloadRef string, idempotentSubmit bool) error { op := core.NewOperation(dm.sharedstorage, dm.namespace.Name, tx, core.OpTypeSharedStorageDownloadBlob) addDownloadBlobInputs(op, dataID, payloadRef) - return dm.createAndDispatchOp(ctx, op, opDownloadBlob(op, dataID, payloadRef)) + return dm.createAndDispatchOp(ctx, op, opDownloadBlob(op, dataID, payloadRef), idempotentSubmit) } -func (dm *downloadManager) createAndDispatchOp(ctx context.Context, op *core.Operation, preparedOp *core.PreparedOperation) error { +func (dm *downloadManager) createAndDispatchOp(ctx context.Context, op *core.Operation, preparedOp *core.PreparedOperation, idempotentSubmit bool) error { err := dm.operations.AddOrReuseOperation(ctx, op, func() { // Use a closure hook to dispatch the work once the operation is successfully in the DB. // Note we have crash recovery of pending operations on startup. dm.dispatchWork(&downloadWork{ - dispatchedAt: time.Now(), - preparedOp: preparedOp, + dispatchedAt: time.Now(), + preparedOp: preparedOp, + idempotentSubmit: idempotentSubmit, }) }) if err != nil { diff --git a/internal/shareddownload/download_manager_test.go b/internal/shareddownload/download_manager_test.go index f7d794da10..fd217ec9a0 100644 --- a/internal/shareddownload/download_manager_test.go +++ b/internal/shareddownload/download_manager_test.go @@ -102,17 +102,17 @@ func TestDownloadBatchE2EOk(t *testing.T) { assert.Equal(t, "ref1", op.Data.(downloadBatchData).PayloadRef) return true }), mock.Anything).Return(nil, nil).Run(func(args mock.Arguments) { - output, complete, err := dm.RunOperation(args[0].(context.Context), args[1].(*core.PreparedOperation)) + output, phase, err := dm.RunOperation(args[0].(context.Context), args[1].(*core.PreparedOperation)) assert.NoError(t, err) assert.Equal(t, fftypes.JSONObject{"batch": batchID}, output) - assert.True(t, complete) + assert.Equal(t, core.OpPhaseComplete, phase) close(called) }) mci := dm.callbacks.(*shareddownloadmocks.Callbacks) mci.On("SharedStorageBatchDownloaded", "ref1", []byte("some batch data")).Return(batchID, nil) - err := dm.InitiateDownloadBatch(dm.ctx, txID, "ref1") + err := dm.InitiateDownloadBatch(dm.ctx, txID, "ref1", false) assert.NoError(t, err) <-called @@ -169,21 +169,21 @@ func TestDownloadBlobWithRetryOk(t *testing.T) { assert.Equal(t, "ref1", op.Data.(downloadBlobData).PayloadRef) return true }), mock.Anything).Return(nil, nil).Run(func(args mock.Arguments) { - output, complete, err := dm.RunOperation(args[0].(context.Context), args[1].(*core.PreparedOperation)) + output, phase, err := dm.RunOperation(args[0].(context.Context), args[1].(*core.PreparedOperation)) assert.NoError(t, err) assert.Equal(t, fftypes.JSONObject{ "dxPayloadRef": "privateRef1", "hash": blobHash, "size": 12345, }.String(), output.String()) - assert.True(t, complete) + assert.Equal(t, core.OpPhaseComplete, phase) close(called) }).Once() mci := dm.callbacks.(*shareddownloadmocks.Callbacks) mci.On("SharedStorageBlobDownloaded", *blobHash, int64(12345), "privateRef1", dataID).Return(nil) - err := dm.InitiateDownloadBlob(dm.ctx, txID, dataID, "ref1") + err := dm.InitiateDownloadBlob(dm.ctx, txID, dataID, "ref1", false) assert.NoError(t, err) <-called @@ -209,7 +209,7 @@ func TestDownloadBlobInsertOpFail(t *testing.T) { mom := dm.operations.(*operationmocks.Manager) mom.On("AddOrReuseOperation", mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - err := dm.InitiateDownloadBlob(dm.ctx, txID, dataID, "ref1") + err := dm.InitiateDownloadBlob(dm.ctx, txID, dataID, "ref1", false) assert.Regexp(t, "pop", err) mom.AssertExpectations(t) @@ -288,14 +288,15 @@ func TestDownloadManagerStartupRecoveryCombinations(t *testing.T) { mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *core.PreparedOperation) bool { return op.Type == core.OpTypeSharedStorageDownloadBatch && op.Data.(downloadBatchData).PayloadRef == "ref2" }), mock.Anything).Return(nil, nil).Run(func(args mock.Arguments) { - output, complete, err := dm.RunOperation(args[0].(context.Context), args[1].(*core.PreparedOperation)) + output, phase, err := dm.RunOperation(args[0].(context.Context), args[1].(*core.PreparedOperation)) assert.NoError(t, err) assert.Equal(t, fftypes.JSONObject{ "batch": batchID, }.String(), output.String()) - assert.True(t, complete) + assert.Equal(t, core.OpPhaseComplete, phase) called <- true }) + mom.On("SubmitOperationUpdate", mock.Anything).Return(nil) mci := dm.callbacks.(*shareddownloadmocks.Callbacks) mci.On("SharedStorageBatchDownloaded", "ref2", []byte("some batch data")).Return(batchID, nil) diff --git a/internal/shareddownload/download_worker.go b/internal/shareddownload/download_worker.go index ab5f86eab9..cae26e501c 100644 --- a/internal/shareddownload/download_worker.go +++ b/internal/shareddownload/download_worker.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -21,7 +21,7 @@ import ( "fmt" "github.com/hyperledger/firefly-common/pkg/log" - "github.com/hyperledger/firefly/internal/operations" + "github.com/hyperledger/firefly/pkg/core" ) type downloadWorker struct { @@ -59,15 +59,17 @@ func (dw *downloadWorker) attemptWork(work *downloadWork) { work.attempts++ isLastAttempt := work.attempts >= dw.dm.retryMaxAttempts - options := []operations.RunOperationOption{operations.RemainPendingOnFailure} - if isLastAttempt { - options = []operations.RunOperationOption{} - } - - _, err := dw.dm.operations.RunOperation(dw.ctx, work.preparedOp, options...) + _, err := dw.dm.operations.RunOperation(dw.ctx, work.preparedOp, work.idempotentSubmit) if err != nil { log.L(dw.ctx).Errorf("Download operation %s/%s attempt=%d/%d failed: %s", work.preparedOp.Type, work.preparedOp.ID, work.attempts, dw.dm.retryMaxAttempts, err) - if !isLastAttempt { + if isLastAttempt { + dw.dm.operations.SubmitOperationUpdate(&core.OperationUpdate{ + NamespacedOpID: work.preparedOp.NamespacedIDString(), + Plugin: work.preparedOp.Plugin, + Status: core.OpStatusFailed, + ErrorMessage: err.Error(), + }) + } else { go dw.dm.waitAndRetryDownload(work) } } diff --git a/internal/shareddownload/operations.go b/internal/shareddownload/operations.go index 2ee2150105..94c8f2f9f2 100644 --- a/internal/shareddownload/operations.go +++ b/internal/shareddownload/operations.go @@ -96,25 +96,25 @@ func (dm *downloadManager) PrepareOperation(ctx context.Context, op *core.Operat } } -func (dm *downloadManager) RunOperation(ctx context.Context, op *core.PreparedOperation) (outputs fftypes.JSONObject, complete bool, err error) { +func (dm *downloadManager) RunOperation(ctx context.Context, op *core.PreparedOperation) (outputs fftypes.JSONObject, phase core.OpPhase, err error) { switch data := op.Data.(type) { case downloadBatchData: return dm.downloadBatch(ctx, data) case downloadBlobData: return dm.downloadBlob(ctx, data) default: - return nil, false, i18n.NewError(ctx, coremsgs.MsgOperationDataIncorrect, op.Data) + return nil, core.OpPhaseInitializing, i18n.NewError(ctx, coremsgs.MsgOperationDataIncorrect, op.Data) } } // downloadBatch retrieves a serialized batch from public storage, then persists it and drives a rewind // on the messages included (just like the event driven when we receive data over DX). -func (dm *downloadManager) downloadBatch(ctx context.Context, data downloadBatchData) (outputs fftypes.JSONObject, complete bool, err error) { +func (dm *downloadManager) downloadBatch(ctx context.Context, data downloadBatchData) (outputs fftypes.JSONObject, phase core.OpPhase, err error) { // Download into memory for batches reader, err := dm.sharedstorage.DownloadData(ctx, data.PayloadRef) if err != nil { - return nil, false, i18n.WrapError(ctx, err, coremsgs.MsgDownloadSharedFailed, data.PayloadRef) + return nil, core.OpPhaseInitializing, i18n.WrapError(ctx, err, coremsgs.MsgDownloadSharedFailed, data.PayloadRef) } defer reader.Close() @@ -123,42 +123,42 @@ func (dm *downloadManager) downloadBatch(ctx context.Context, data downloadBatch limitedReader := io.LimitReader(reader, maxReadLimit) batchBytes, err := io.ReadAll(limitedReader) if err != nil { - return nil, false, i18n.WrapError(ctx, err, coremsgs.MsgDownloadSharedFailed, data.PayloadRef) + return nil, core.OpPhasePending, i18n.WrapError(ctx, err, coremsgs.MsgDownloadSharedFailed, data.PayloadRef) } if len(batchBytes) == int(maxReadLimit) { - return nil, false, i18n.WrapError(ctx, err, coremsgs.MsgDownloadBatchMaxBytes, data.PayloadRef) + return nil, core.OpPhasePending, i18n.WrapError(ctx, err, coremsgs.MsgDownloadBatchMaxBytes, data.PayloadRef) } // Parse and store the batch batchID, err := dm.callbacks.SharedStorageBatchDownloaded(data.PayloadRef, batchBytes) if err != nil { - return nil, false, err + return nil, core.OpPhasePending, err } - return getDownloadBatchOutputs(batchID), true, nil + return getDownloadBatchOutputs(batchID), core.OpPhaseComplete, nil } -func (dm *downloadManager) downloadBlob(ctx context.Context, data downloadBlobData) (outputs fftypes.JSONObject, complete bool, err error) { +func (dm *downloadManager) downloadBlob(ctx context.Context, data downloadBlobData) (outputs fftypes.JSONObject, phase core.OpPhase, err error) { // Stream from shared storage ... reader, err := dm.sharedstorage.DownloadData(ctx, data.PayloadRef) if err != nil { - return nil, false, err + return nil, core.OpPhasePending, err } defer reader.Close() // ... to data exchange dxPayloadRef, hash, blobSize, err := dm.dataexchange.UploadBlob(ctx, dm.namespace.NetworkName, *data.DataID, reader) if err != nil { - return nil, false, i18n.WrapError(ctx, err, coremsgs.MsgDownloadSharedFailed, data.PayloadRef) + return nil, core.OpPhasePending, i18n.WrapError(ctx, err, coremsgs.MsgDownloadSharedFailed, data.PayloadRef) } log.L(ctx).Infof("Transferred blob '%s' (%s) from shared storage '%s' to local data exchange '%s'", hash, units.HumanSizeWithPrecision(float64(blobSize), 2), data.PayloadRef, dxPayloadRef) // then callback to store metadata if err := dm.callbacks.SharedStorageBlobDownloaded(*hash, blobSize, dxPayloadRef, data.DataID); err != nil { - return nil, false, err + return nil, core.OpPhasePending, err } - return getDownloadBlobOutputs(hash, blobSize, dxPayloadRef), true, nil + return getDownloadBlobOutputs(hash, blobSize, dxPayloadRef), core.OpPhaseComplete, nil } func (dm *downloadManager) OnOperationUpdate(ctx context.Context, op *core.Operation, update *core.OperationUpdate) error { diff --git a/internal/syncasync/sync_async_bridge.go b/internal/syncasync/sync_async_bridge.go index c559d074b4..6328ab9c25 100644 --- a/internal/syncasync/sync_async_bridge.go +++ b/internal/syncasync/sync_async_bridge.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -219,7 +219,7 @@ func (sa *syncAsyncBridge) getPoolFromMessage(msg *core.Message) (*core.TokenPoo if err != nil || data == nil { return nil, err } - var pool core.TokenPoolAnnouncement + var pool core.TokenPoolDefinition if err := json.Unmarshal(data.Value.Bytes(), &pool); err == nil { return pool.Pool, nil } diff --git a/internal/syncasync/sync_async_bridge_test.go b/internal/syncasync/sync_async_bridge_test.go index b7cb0d5151..2949d7460c 100644 --- a/internal/syncasync/sync_async_bridge_test.go +++ b/internal/syncasync/sync_async_bridge_test.go @@ -820,7 +820,7 @@ func TestAwaitTokenPoolConfirmationRejected(t *testing.T) { sa, cancel := newTestSyncAsyncBridge(t) defer cancel() - pool := &core.TokenPoolAnnouncement{ + pool := &core.TokenPoolDefinition{ Pool: &core.TokenPool{ ID: fftypes.NewUUID(), }, diff --git a/internal/tokens/fftokens/config.go b/internal/tokens/fftokens/config.go index 8140f12f9a..a2c42ea643 100644 --- a/internal/tokens/fftokens/config.go +++ b/internal/tokens/fftokens/config.go @@ -24,9 +24,17 @@ import ( ) const ( - FFTEventRetryInitialDelay = "eventRetry.initialDelay" - FFTEventRetryMaxDelay = "eventRetry.maxDelay" - FFTEventRetryFactor = "eventRetry.factor" + FFTEventRetryInitialDelay = "eventRetry.initialDelay" + FFTEventRetryMaxDelay = "eventRetry.maxDelay" + FFTEventRetryFactor = "eventRetry.factor" + FFTBackgroundStart = "backgroundStart.enabled" + FFTBackgroundStartInitialDelay = "backgroundStart.initialDelay" + FFTBackgroundStartMaxDelay = "backgroundStart.maxDelay" + FFTBackgroundStartFactor = "backgroundStart.factor" + + defaultBackgroundInitialDelay = "5s" + defaultBackgroundRetryFactor = 2.0 + defaultBackgroundMaxDelay = "1m" ) func (ft *FFTokens) InitConfig(config config.Section) { @@ -35,4 +43,8 @@ func (ft *FFTokens) InitConfig(config config.Section) { config.AddKnownKey(FFTEventRetryInitialDelay, 50*time.Millisecond) config.AddKnownKey(FFTEventRetryMaxDelay, 30*time.Second) config.AddKnownKey(FFTEventRetryFactor, 2.0) + config.AddKnownKey(FFTBackgroundStart, false) + config.AddKnownKey(FFTBackgroundStartInitialDelay, defaultBackgroundInitialDelay) + config.AddKnownKey(FFTBackgroundStartMaxDelay, defaultBackgroundMaxDelay) + config.AddKnownKey(FFTBackgroundStartFactor, defaultBackgroundRetryFactor) } diff --git a/internal/tokens/fftokens/fftokens.go b/internal/tokens/fftokens/fftokens.go index fa6d330473..1d8132f2d6 100644 --- a/internal/tokens/fftokens/fftokens.go +++ b/internal/tokens/fftokens/fftokens.go @@ -19,6 +19,9 @@ package fftokens import ( "context" "encoding/json" + "fmt" + "net/http" + "strings" "sync" "github.com/go-resty/resty/v2" @@ -37,15 +40,29 @@ import ( "github.com/hyperledger/firefly/pkg/tokens" ) +type ConflictError struct { + err error +} + +func (ie *ConflictError) Error() string { + return ie.err.Error() +} + +func (ie *ConflictError) IsConflictError() bool { + return true +} + type FFTokens struct { - ctx context.Context - cancelCtx context.CancelFunc - capabilities *tokens.Capabilities - callbacks callbacks - configuredName string - client *resty.Client - wsconn wsclient.WSClient - retry *retry.Retry + ctx context.Context + cancelCtx context.CancelFunc + capabilities *tokens.Capabilities + callbacks callbacks + configuredName string + client *resty.Client + wsconn wsclient.WSClient + retry *retry.Retry + backgroundRetry *retry.Retry + backgroundStart bool } type callbacks struct { @@ -71,12 +88,19 @@ func (cb *callbacks) OperationUpdate(ctx context.Context, nsOpID string, status } } -func (cb *callbacks) TokenPoolCreated(ctx context.Context, pool *tokens.TokenPool) error { - // Deliver token pool creation events to every handler - for _, handler := range cb.handlers { - if err := handler.TokenPoolCreated(ctx, cb.plugin, pool); err != nil { - return err +func (cb *callbacks) TokenPoolCreated(ctx context.Context, namespace string, pool *tokens.TokenPool) error { + if namespace == "" { + // Some pool creation subscriptions don't populate namespace, so deliver the event to every handler + for _, handler := range cb.handlers { + if err := handler.TokenPoolCreated(ctx, cb.plugin, pool); err != nil { + return err + } + } + } else { + if handler, ok := cb.handlers[namespace]; ok { + return handler.TokenPoolCreated(ctx, cb.plugin, pool) } + log.L(ctx).Errorf("No handler found for token pool event on namespace '%s'", namespace) } return nil } @@ -154,7 +178,12 @@ type activatePool struct { PoolData string `json:"poolData"` PoolLocator string `json:"poolLocator"` Config fftypes.JSONObject `json:"config"` - RequestID string `json:"requestId,omitempty"` +} + +type deactivatePool struct { + PoolData string `json:"poolData"` + PoolLocator string `json:"poolLocator"` + Config fftypes.JSONObject `json:"config"` } type tokenInterface struct { @@ -221,6 +250,23 @@ type tokenError struct { Message string `json:"message,omitempty"` } +func packPoolData(namespace string, id *fftypes.UUID) string { + if id == nil { + return namespace + } + return namespace + "|" + id.String() +} + +func unpackPoolData(ctx context.Context, data string) (namespace string, id *fftypes.UUID) { + pieces := strings.Split(data, "|") + if len(pieces) > 1 { + if id, err := fftypes.ParseUUID(ctx, pieces[1]); err == nil { + return pieces[0], id + } + } + return pieces[0], nil +} + func (ft *FFTokens) Name() string { return "fftokens" } @@ -239,12 +285,12 @@ func (ft *FFTokens) Init(ctx context.Context, cancelCtx context.CancelFunc, name if config.GetString(ffresty.HTTPConfigURL) == "" { return i18n.NewError(ctx, coremsgs.MsgMissingPluginConfig, "url", "tokens.fftokens") } - ft.client, err = ffresty.New(ft.ctx, config) - if err != nil { - return err - } wsConfig, err := wsclient.GenerateConfig(ctx, config) + if err == nil { + ft.client, err = ffresty.New(ft.ctx, config) + } + if err != nil { return err } @@ -264,6 +310,17 @@ func (ft *FFTokens) Init(ctx context.Context, cancelCtx context.CancelFunc, name Factor: config.GetFloat64(FFTEventRetryFactor), } + ft.backgroundStart = config.GetBool(FFTBackgroundStart) + + if ft.backgroundStart { + ft.backgroundRetry = &retry.Retry{ + InitialDelay: config.GetDuration(FFTBackgroundStartInitialDelay), + MaximumDelay: config.GetDuration(FFTBackgroundStartMaxDelay), + Factor: config.GetFloat64(FFTBackgroundStartFactor), + } + return nil + } + go ft.eventLoop() return nil @@ -272,16 +329,41 @@ func (ft *FFTokens) Init(ctx context.Context, cancelCtx context.CancelFunc, name func (ft *FFTokens) SetHandler(namespace string, handler tokens.Callbacks) { ft.callbacks.writeLock.Lock() defer ft.callbacks.writeLock.Unlock() - ft.callbacks.handlers[namespace] = handler + if handler == nil { + delete(ft.callbacks.handlers, namespace) + } else { + ft.callbacks.handlers[namespace] = handler + } } func (ft *FFTokens) SetOperationHandler(namespace string, handler core.OperationCallbacks) { ft.callbacks.writeLock.Lock() defer ft.callbacks.writeLock.Unlock() - ft.callbacks.opHandlers[namespace] = handler + if handler == nil { + delete(ft.callbacks.opHandlers, namespace) + } else { + ft.callbacks.opHandlers[namespace] = handler + } +} + +func (ft *FFTokens) backgroundStartLoop() { + _ = ft.backgroundRetry.Do(ft.ctx, fmt.Sprintf("Background start %s", ft.Name()), func(attempt int) (retry bool, err error) { + err = ft.wsconn.Connect() + if err != nil { + return true, err + } + + go ft.eventLoop() + + return false, nil + }) } func (ft *FFTokens) Start() error { + if ft.backgroundStart { + go ft.backgroundStartLoop() + return nil + } return ft.wsconn.Connect() } @@ -342,47 +424,51 @@ func (ft *FFTokens) buildBlockchainEvent(eventData fftypes.JSONObject) *blockcha return nil } -func (ft *FFTokens) handleTokenPoolCreate(ctx context.Context, data fftypes.JSONObject, poolData *tokenData) (err error) { +func (ft *FFTokens) handleTokenPoolCreate(ctx context.Context, eventData fftypes.JSONObject, txData *tokenData) (err error) { - tokenType := data.GetString("type") - poolLocator := data.GetString("poolLocator") + tokenType := eventData.GetString("type") + poolLocator := eventData.GetString("poolLocator") if tokenType == "" || poolLocator == "" { - log.L(ctx).Errorf("TokenPool event is not valid - missing data: %+v", data) + log.L(ctx).Errorf("TokenPool event is not valid - missing data: %+v", eventData) return nil // move on } // These fields are optional - standard := data.GetString("standard") - interfaceFormat := data.GetString("interfaceFormat") - symbol := data.GetString("symbol") - decimals := data.GetInt64("decimals") - info := data.GetObject("info") - blockchainEvent := data.GetObject("blockchain") - - poolDataString := data.GetString("data") - if poolData == nil { - poolData = &tokenData{} - if poolDataString != "" { + standard := eventData.GetString("standard") + interfaceFormat := eventData.GetString("interfaceFormat") + symbol := eventData.GetString("symbol") + decimals := eventData.GetInt64("decimals") + info := eventData.GetObject("info") + blockchainEvent := eventData.GetObject("blockchain") + poolData := eventData.GetString("poolData") + namespace, poolID := unpackPoolData(ctx, poolData) + + dataString := eventData.GetString("data") + if txData == nil { + txData = &tokenData{} + if dataString != "" { // We want to process all events, even those not initiated by FireFly. // The "data" argument is optional, so it's important not to fail if it's missing or malformed. - if err = json.Unmarshal([]byte(poolDataString), &poolData); err != nil { - log.L(ctx).Warnf("TokenPool event data could not be parsed - continuing anyway (%s): %+v", err, data) - poolData = &tokenData{} + if err = json.Unmarshal([]byte(dataString), &txData); err != nil { + log.L(ctx).Warnf("TokenPool event data could not be parsed - continuing anyway (%s): %+v", err, eventData) + txData = &tokenData{} } } } - txType := poolData.TXType + txType := txData.TXType if txType == "" { txType = core.TransactionTypeTokenPool } pool := &tokens.TokenPool{ + ID: poolID, Type: fftypes.FFEnum(tokenType), PoolLocator: poolLocator, + PluginData: poolData, TX: core.TransactionRef{ - ID: poolData.TX, + ID: txData.TX, Type: txType, }, Connector: ft.configuredName, @@ -395,18 +481,18 @@ func (ft *FFTokens) handleTokenPoolCreate(ctx context.Context, data fftypes.JSON } // If there's an error dispatching the event, we must return the error and shutdown - log.L(ctx).Debugf("Calling TokenPoolCreated callback. Locator='%s' TX=%s/%s", pool.PoolLocator, txType, poolData.TX) - return ft.callbacks.TokenPoolCreated(ctx, pool) + log.L(ctx).Debugf("Calling TokenPoolCreated callback. Locator='%s' TX=%s/%s", pool.PoolLocator, txType, txData.TX) + return ft.callbacks.TokenPoolCreated(ctx, namespace, pool) } -func (ft *FFTokens) handleTokenTransfer(ctx context.Context, t core.TokenTransferType, data fftypes.JSONObject) (err error) { - protocolID := data.GetString("id") - poolLocator := data.GetString("poolLocator") - signerAddress := data.GetString("signer") - fromAddress := data.GetString("from") - toAddress := data.GetString("to") - value := data.GetString("amount") - blockchainEvent := ft.buildBlockchainEvent(data.GetObject("blockchain")) +func (ft *FFTokens) handleTokenTransfer(ctx context.Context, t core.TokenTransferType, eventData fftypes.JSONObject) (err error) { + protocolID := eventData.GetString("id") + poolLocator := eventData.GetString("poolLocator") + signerAddress := eventData.GetString("signer") + fromAddress := eventData.GetString("from") + toAddress := eventData.GetString("to") + value := eventData.GetString("amount") + blockchainEvent := ft.buildBlockchainEvent(eventData.GetObject("blockchain")) if protocolID == "" || poolLocator == "" || @@ -414,28 +500,28 @@ func (ft *FFTokens) handleTokenTransfer(ctx context.Context, t core.TokenTransfe (t != core.TokenTransferTypeMint && fromAddress == "") || (t != core.TokenTransferTypeBurn && toAddress == "") || blockchainEvent == nil { - log.L(ctx).Errorf("%s event is not valid - missing data: %+v", t, data) + log.L(ctx).Errorf("%s event is not valid - missing data: %+v", t, eventData) return nil // move on } // These fields are optional - tokenIndex := data.GetString("tokenIndex") - uri := data.GetString("uri") - namespace := data.GetString("poolData") + tokenIndex := eventData.GetString("tokenIndex") + uri := eventData.GetString("uri") + namespace, poolID := unpackPoolData(ctx, eventData.GetString("poolData")) // We want to process all events, even those not initiated by FireFly. // The "data" argument is optional, so it's important not to fail if it's missing or malformed. - transferDataString := data.GetString("data") + transferDataString := eventData.GetString("data") var transferData tokenData if err = json.Unmarshal([]byte(transferDataString), &transferData); err != nil { - log.L(ctx).Infof("%s event data could not be parsed - continuing anyway (%s): %+v", t, err, data) + log.L(ctx).Infof("%s event data could not be parsed - continuing anyway (%s): %+v", t, err, eventData) transferData = tokenData{} } var amount fftypes.FFBigInt _, ok := amount.Int().SetString(value, 10) if !ok { - log.L(ctx).Errorf("%s event is not valid - invalid amount: %+v", t, data) + log.L(ctx).Errorf("%s event is not valid - invalid amount: %+v", t, eventData) return nil // move on } @@ -448,6 +534,7 @@ func (ft *FFTokens) handleTokenTransfer(ctx context.Context, t core.TokenTransfe PoolLocator: poolLocator, TokenTransfer: core.TokenTransfer{ Type: t, + Pool: poolID, TokenIndex: tokenIndex, URI: uri, Connector: ft.configuredName, @@ -470,34 +557,34 @@ func (ft *FFTokens) handleTokenTransfer(ctx context.Context, t core.TokenTransfe return ft.callbacks.TokensTransferred(ctx, namespace, transfer) } -func (ft *FFTokens) handleTokenApproval(ctx context.Context, data fftypes.JSONObject) (err error) { - protocolID := data.GetString("id") - subject := data.GetString("subject") - signerAddress := data.GetString("signer") - poolLocator := data.GetString("poolLocator") - operatorAddress := data.GetString("operator") - approved := data.GetBool("approved") - blockchainEvent := ft.buildBlockchainEvent(data.GetObject("blockchain")) +func (ft *FFTokens) handleTokenApproval(ctx context.Context, eventData fftypes.JSONObject) (err error) { + protocolID := eventData.GetString("id") + subject := eventData.GetString("subject") + signerAddress := eventData.GetString("signer") + poolLocator := eventData.GetString("poolLocator") + operatorAddress := eventData.GetString("operator") + approved := eventData.GetBool("approved") + blockchainEvent := ft.buildBlockchainEvent(eventData.GetObject("blockchain")) if protocolID == "" || subject == "" || poolLocator == "" || operatorAddress == "" || blockchainEvent == nil { - log.L(ctx).Errorf("Approval event is not valid - missing data: %+v", data) + log.L(ctx).Errorf("Approval event is not valid - missing data: %+v", eventData) return nil // move on } // These fields are optional - info := data.GetObject("info") - namespace := data.GetString("poolData") + info := eventData.GetObject("info") + namespace, poolID := unpackPoolData(ctx, eventData.GetString("poolData")) // We want to process all events, even those not initiated by FireFly. // The "data" argument is optional, so it's important not to fail if it's missing or malformed. - approvalDataString := data.GetString("data") + approvalDataString := eventData.GetString("data") var approvalData tokenData if err = json.Unmarshal([]byte(approvalDataString), &approvalData); err != nil { - log.L(ctx).Infof("TokenApproval event data could not be parsed - continuing anyway (%s): %+v", err, data) + log.L(ctx).Infof("TokenApproval event data could not be parsed - continuing anyway (%s): %+v", err, eventData) approvalData = tokenData{} } @@ -510,6 +597,7 @@ func (ft *FFTokens) handleTokenApproval(ctx context.Context, data fftypes.JSONOb PoolLocator: poolLocator, TokenApproval: core.TokenApproval{ Connector: ft.configuredName, + Pool: poolID, Key: signerAddress, Operator: operatorAddress, Approved: approved, @@ -615,16 +703,23 @@ func (ft *FFTokens) eventLoop() { // // "Bad Request: Field 'x' is required" func wrapError(ctx context.Context, errRes *tokenError, res *resty.Response, err error) error { - if errRes != nil && errRes.Message != "" { + if errRes != nil && (errRes.Message != "" || errRes.Error != "") { + errMsgFromBody := errRes.Message if errRes.Error != "" { - return i18n.WrapError(ctx, err, coremsgs.MsgTokensRESTErr, errRes.Error+": "+errRes.Message) + errMsgFromBody = errRes.Error + ": " + errRes.Message + } + if res != nil && res.StatusCode() == http.StatusConflict { + return &ConflictError{err: i18n.WrapError(ctx, err, coremsgs.MsgTokensRESTErrConflict, errMsgFromBody)} } - return i18n.WrapError(ctx, err, coremsgs.MsgTokensRESTErr, errRes.Message) + err = i18n.WrapError(ctx, err, coremsgs.MsgTokensRESTErr, errMsgFromBody) + } + if res != nil && res.StatusCode() == http.StatusConflict { + return &ConflictError{err: ffresty.WrapRestErr(ctx, res, err, coremsgs.MsgTokensRESTErrConflict)} } return ffresty.WrapRestErr(ctx, res, err, coremsgs.MsgTokensRESTErr) } -func (ft *FFTokens) CreateTokenPool(ctx context.Context, nsOpID string, pool *core.TokenPool) (complete bool, err error) { +func (ft *FFTokens) CreateTokenPool(ctx context.Context, nsOpID string, pool *core.TokenPool) (phase core.OpPhase, err error) { tokenData := &tokenData{ TX: pool.TX.ID, TXType: pool.TX.Type, @@ -644,51 +739,67 @@ func (ft *FFTokens) CreateTokenPool(ctx context.Context, nsOpID string, pool *co SetError(&errRes). Post("/api/v1/createpool") if err != nil || !res.IsSuccess() { - return false, wrapError(ctx, &errRes, res, err) + return core.OpPhaseInitializing, wrapError(ctx, &errRes, res, err) } if res.StatusCode() == 200 { // HTTP 200: Creation was successful, and pool details are in response body var obj fftypes.JSONObject if err := json.Unmarshal(res.Body(), &obj); err != nil { - return false, i18n.WrapError(ctx, err, i18n.MsgJSONObjectParseFailed, res.Body()) + return core.OpPhaseComplete, i18n.WrapError(ctx, err, i18n.MsgJSONObjectParseFailed, res.Body()) } - return true, ft.handleTokenPoolCreate(ctx, obj, tokenData) + obj["poolData"] = packPoolData(pool.Namespace, pool.ID) + return core.OpPhaseComplete, ft.handleTokenPoolCreate(ctx, obj, tokenData) } // Default (HTTP 202): Request was accepted, and success/failure status will be delivered via websocket - return false, nil + return core.OpPhasePending, nil } -func (ft *FFTokens) ActivateTokenPool(ctx context.Context, nsOpID string, pool *core.TokenPool) (complete bool, err error) { +func (ft *FFTokens) ActivateTokenPool(ctx context.Context, pool *core.TokenPool) (phase core.OpPhase, err error) { var errRes tokenError res, err := ft.client.R().SetContext(ctx). SetBody(&activatePool{ - RequestID: nsOpID, - PoolData: pool.Namespace, + PoolData: packPoolData(pool.Namespace, pool.ID), PoolLocator: pool.Locator, Config: pool.Config, }). SetError(&errRes). Post("/api/v1/activatepool") if err != nil || !res.IsSuccess() { - return false, wrapError(ctx, &errRes, res, err) + return core.OpPhaseInitializing, wrapError(ctx, &errRes, res, err) } if res.StatusCode() == 200 { // HTTP 200: Activation was successful, and pool details are in response body var obj fftypes.JSONObject if err := json.Unmarshal(res.Body(), &obj); err != nil { - return false, i18n.WrapError(ctx, err, i18n.MsgJSONObjectParseFailed, res.Body()) + return core.OpPhaseComplete, i18n.WrapError(ctx, err, i18n.MsgJSONObjectParseFailed, res.Body()) } - return true, ft.handleTokenPoolCreate(ctx, obj, &tokenData{ + return core.OpPhaseComplete, ft.handleTokenPoolCreate(ctx, obj, &tokenData{ TX: pool.TX.ID, TXType: pool.TX.Type, }) } else if res.StatusCode() == 204 { // HTTP 204: Activation was successful, but pool details are not available // This will resolve the operation, but connector is responsible for re-delivering pool details on the websocket. - return true, nil + return core.OpPhaseComplete, nil } // Default (HTTP 202): Request was accepted, and success/failure status will be delivered via websocket - return false, nil + return core.OpPhasePending, nil +} + +func (ft *FFTokens) DeactivateTokenPool(ctx context.Context, pool *core.TokenPool) error { + var errRes tokenError + res, err := ft.client.R().SetContext(ctx). + SetBody(&deactivatePool{ + PoolData: pool.PluginData, + PoolLocator: pool.Locator, + Config: pool.Config, + }). + SetError(&errRes). + Post("/api/v1/deactivatepool") + if err == nil && (res.IsSuccess() || res.StatusCode() == 404) { + return nil + } + return wrapError(ctx, &errRes, res, err) } func (ft *FFTokens) prepareABI(ctx context.Context, methods []*fftypes.FFIMethod) ([]*abi.Entry, error) { diff --git a/internal/tokens/fftokens/fftokens_test.go b/internal/tokens/fftokens/fftokens_test.go index 7852779b30..da03d619a4 100644 --- a/internal/tokens/fftokens/fftokens_test.go +++ b/internal/tokens/fftokens/fftokens_test.go @@ -23,9 +23,11 @@ import ( "fmt" "io/ioutil" "net/http" + "net/http/httptest" "net/url" "testing" + "github.com/go-resty/resty/v2" "github.com/hyperledger/firefly-common/pkg/config" "github.com/hyperledger/firefly-common/pkg/ffresty" "github.com/hyperledger/firefly-common/pkg/fftls" @@ -33,6 +35,7 @@ import ( "github.com/hyperledger/firefly-common/pkg/retry" "github.com/hyperledger/firefly-common/pkg/wsclient" "github.com/hyperledger/firefly/internal/coreconfig" + "github.com/hyperledger/firefly/internal/operations" "github.com/hyperledger/firefly/mocks/coremocks" "github.com/hyperledger/firefly/mocks/tokenmocks" "github.com/hyperledger/firefly/mocks/wsmocks" @@ -75,6 +78,23 @@ func newTestFFTokens(t *testing.T) (h *FFTokens, toServer, fromServer chan strin } } +func TestUnsetHandler(t *testing.T) { + h, _, _, _, done := newTestFFTokens(t) + defer done() + mcb1 := &tokenmocks.Callbacks{} + h.SetHandler("ns1", mcb1) + mocb1 := &coremocks.OperationCallbacks{} + h.SetOperationHandler("ns1", mocb1) + assert.Equal(t, 1, len(h.callbacks.handlers)) + assert.Equal(t, 1, len(h.callbacks.opHandlers)) + h.SetHandler("ns1", nil) + assert.Empty(t, h.callbacks.handlers) + h.SetOperationHandler("ns1", nil) + assert.Empty(t, h.callbacks.opHandlers) + assert.Equal(t, 0, len(h.callbacks.handlers)) + assert.Equal(t, 0, len(h.callbacks.opHandlers)) +} + func TestInitBadURL(t *testing.T) { coreconfig.Reset() h := &FFTokens{} @@ -87,6 +107,20 @@ func TestInitBadURL(t *testing.T) { assert.Regexp(t, "FF00149", err) } +func TestInitBackgroundStart1(t *testing.T) { + coreconfig.Reset() + h := &FFTokens{} + h.InitConfig(ffTokensConfig) + + ffTokensConfig.AddKnownKey(ffresty.HTTPConfigURL, "http://localhost:8080") + ffTokensConfig.Set(FFTBackgroundStart, true) + + ctx, cancelCtx := context.WithCancel(context.Background()) + err := h.Init(ctx, cancelCtx, "testtokens", ffTokensConfig) + assert.NoError(t, err) + assert.NotNil(t, h.backgroundRetry) +} + func TestInitBadTLS(t *testing.T) { coreconfig.Reset() h := &FFTokens{} @@ -164,8 +198,8 @@ func TestCreateTokenPool(t *testing.T) { return res, nil }) - complete, err := h.CreateTokenPool(context.Background(), nsOpID, pool) - assert.False(t, complete) + phase, err := h.CreateTokenPool(context.Background(), nsOpID, pool) + assert.Equal(t, core.OpPhasePending, phase) assert.NoError(t, err) } @@ -188,8 +222,8 @@ func TestCreateTokenPoolError(t *testing.T) { })) nsOpID := "ns1:" + fftypes.NewUUID().String() - complete, err := h.CreateTokenPool(context.Background(), nsOpID, pool) - assert.False(t, complete) + phase, err := h.CreateTokenPool(context.Background(), nsOpID, pool) + assert.Equal(t, core.OpPhaseInitializing, phase) assert.Regexp(t, "FF10274.*Bad Request: Missing required field", err) } @@ -211,8 +245,8 @@ func TestCreateTokenPoolErrorMessageOnly(t *testing.T) { })) nsOpID := "ns1:" + fftypes.NewUUID().String() - complete, err := h.CreateTokenPool(context.Background(), nsOpID, pool) - assert.False(t, complete) + phase, err := h.CreateTokenPool(context.Background(), nsOpID, pool) + assert.Equal(t, core.OpPhaseInitializing, phase) assert.Regexp(t, "FF10274.*Missing required field", err) } @@ -232,8 +266,8 @@ func TestCreateTokenPoolUnexpectedError(t *testing.T) { httpmock.NewStringResponder(400, "Failed miserably")) nsOpID := "ns1:" + fftypes.NewUUID().String() - complete, err := h.CreateTokenPool(context.Background(), nsOpID, pool) - assert.False(t, complete) + phase, err := h.CreateTokenPool(context.Background(), nsOpID, pool) + assert.Equal(t, core.OpPhaseInitializing, phase) assert.Regexp(t, "FF10274.*Failed miserably", err) } @@ -290,8 +324,8 @@ func TestCreateTokenPoolSynchronous(t *testing.T) { return p.PoolLocator == "F1" && p.Type == core.TokenTypeFungible && *p.TX.ID == *pool.TX.ID })).Return(nil) - complete, err := h.CreateTokenPool(ctx, nsOpID, pool) - assert.True(t, complete) + phase, err := h.CreateTokenPool(ctx, nsOpID, pool) + assert.Equal(t, core.OpPhaseComplete, phase) assert.NoError(t, err) } @@ -333,8 +367,8 @@ func TestCreateTokenPoolSynchronousBadResponse(t *testing.T) { return res, nil }) - complete, err := h.CreateTokenPool(context.Background(), nsOpID, pool) - assert.False(t, complete) + phase, err := h.CreateTokenPool(context.Background(), nsOpID, pool) + assert.Equal(t, core.OpPhaseComplete, phase) assert.Regexp(t, "FF00127", err) } @@ -342,8 +376,6 @@ func TestActivateTokenPool(t *testing.T) { h, _, _, httpURL, done := newTestFFTokens(t) defer done() - opID := fftypes.NewUUID() - nsOpID := "ns1:" + opID.String() poolConfig := map[string]interface{}{ "address": "0x12345", } @@ -360,7 +392,6 @@ func TestActivateTokenPool(t *testing.T) { assert.NoError(t, err) assert.Equal(t, fftypes.JSONObject{ "poolData": "ns1", - "requestId": "ns1:" + opID.String(), "poolLocator": "N1", "config": poolConfig, }, body) @@ -375,8 +406,8 @@ func TestActivateTokenPool(t *testing.T) { return res, nil }) - complete, err := h.ActivateTokenPool(context.Background(), nsOpID, pool) - assert.False(t, complete) + phase, err := h.ActivateTokenPool(context.Background(), pool) + assert.Equal(t, core.OpPhasePending, phase) assert.NoError(t, err) } @@ -395,9 +426,8 @@ func TestActivateTokenPoolError(t *testing.T) { httpmock.RegisterResponder("POST", fmt.Sprintf("%s/api/v1/activatepool", httpURL), httpmock.NewJsonResponderOrPanic(500, fftypes.JSONObject{})) - nsOpID := "ns1:" + fftypes.NewUUID().String() - complete, err := h.ActivateTokenPool(context.Background(), nsOpID, pool) - assert.False(t, complete) + phase, err := h.ActivateTokenPool(context.Background(), pool) + assert.Equal(t, core.OpPhaseInitializing, phase) assert.Regexp(t, "FF10274", err) } @@ -405,8 +435,6 @@ func TestActivateTokenPoolSynchronous(t *testing.T) { h, _, _, httpURL, done := newTestFFTokens(t) defer done() - opID := fftypes.NewUUID() - nsOpID := "ns1:" + opID.String() poolConfig := map[string]interface{}{ "foo": "bar", } @@ -423,7 +451,6 @@ func TestActivateTokenPoolSynchronous(t *testing.T) { assert.NoError(t, err) assert.Equal(t, fftypes.JSONObject{ "poolData": "ns1", - "requestId": "ns1:" + opID.String(), "poolLocator": "N1", "config": poolConfig, }, body) @@ -448,8 +475,8 @@ func TestActivateTokenPoolSynchronous(t *testing.T) { return p.PoolLocator == "F1" && p.Type == core.TokenTypeFungible && p.TX.ID == nil && p.Event == nil })).Return(nil) - complete, err := h.ActivateTokenPool(context.Background(), nsOpID, pool) - assert.True(t, complete) + phase, err := h.ActivateTokenPool(context.Background(), pool) + assert.Equal(t, core.OpPhaseComplete, phase) assert.NoError(t, err) } @@ -457,8 +484,6 @@ func TestActivateTokenPoolSynchronousBadResponse(t *testing.T) { h, _, _, httpURL, done := newTestFFTokens(t) defer done() - opID := fftypes.NewUUID() - nsOpID := "ns1:" + opID.String() poolConfig := map[string]interface{}{ "foo": "bar", } @@ -475,7 +500,6 @@ func TestActivateTokenPoolSynchronousBadResponse(t *testing.T) { assert.NoError(t, err) assert.Equal(t, fftypes.JSONObject{ "poolData": "ns1", - "requestId": "ns1:" + opID.String(), "poolLocator": "N1", "config": poolConfig, }, body) @@ -496,8 +520,8 @@ func TestActivateTokenPoolSynchronousBadResponse(t *testing.T) { return p.PoolLocator == "F1" && p.Type == core.TokenTypeFungible && p.TX.ID == nil })).Return(nil) - complete, err := h.ActivateTokenPool(context.Background(), nsOpID, pool) - assert.False(t, complete) + phase, err := h.ActivateTokenPool(context.Background(), pool) + assert.Equal(t, core.OpPhaseComplete, phase) assert.Regexp(t, "FF00127", err) } @@ -505,14 +529,13 @@ func TestActivateTokenPoolNoContent(t *testing.T) { h, _, _, httpURL, done := newTestFFTokens(t) defer done() - opID := fftypes.NewUUID() - nsOpID := "ns1:" + opID.String() poolConfig := map[string]interface{}{ "foo": "bar", } pool := &core.TokenPool{ Namespace: "ns1", Locator: "N1", + ID: fftypes.NewUUID(), Config: poolConfig, } @@ -522,8 +545,7 @@ func TestActivateTokenPoolNoContent(t *testing.T) { err := json.NewDecoder(req.Body).Decode(&body) assert.NoError(t, err) assert.Equal(t, fftypes.JSONObject{ - "poolData": "ns1", - "requestId": "ns1:" + opID.String(), + "poolData": "ns1|" + pool.ID.String(), "poolLocator": "N1", "config": poolConfig, }, body) @@ -534,11 +556,59 @@ func TestActivateTokenPoolNoContent(t *testing.T) { return res, nil }) - complete, err := h.ActivateTokenPool(context.Background(), nsOpID, pool) - assert.True(t, complete) + phase, err := h.ActivateTokenPool(context.Background(), pool) + assert.Equal(t, core.OpPhaseComplete, phase) assert.NoError(t, err) } +func TestDeactivateTokenPool(t *testing.T) { + h, _, _, httpURL, done := newTestFFTokens(t) + defer done() + + pool := &core.TokenPool{ + Namespace: "ns1", + Locator: "N1", + PluginData: "ns1|pool1", + } + + httpmock.RegisterResponder("POST", fmt.Sprintf("%s/api/v1/deactivatepool", httpURL), + func(req *http.Request) (*http.Response, error) { + body := make(fftypes.JSONObject) + err := json.NewDecoder(req.Body).Decode(&body) + assert.NoError(t, err) + assert.Equal(t, fftypes.JSONObject{ + "poolData": "ns1|pool1", + "poolLocator": "N1", + "config": nil, + }, body) + + res := &http.Response{ + StatusCode: 204, + } + return res, nil + }) + + err := h.DeactivateTokenPool(context.Background(), pool) + assert.NoError(t, err) +} + +func TestDeactivateTokenPoolFail(t *testing.T) { + h, _, _, httpURL, done := newTestFFTokens(t) + defer done() + + pool := &core.TokenPool{ + Namespace: "ns1", + Locator: "N1", + PluginData: "ns1|pool1", + } + + httpmock.RegisterResponder("POST", fmt.Sprintf("%s/api/v1/deactivatepool", httpURL), + httpmock.NewJsonResponderOrPanic(500, fftypes.JSONObject{})) + + err := h.DeactivateTokenPool(context.Background(), pool) + assert.Regexp(t, "FF10274", err) +} + func TestMintTokens(t *testing.T) { h, _, _, httpURL, done := newTestFFTokens(t) defer done() @@ -900,6 +970,139 @@ func TestIgnoredEvents(t *testing.T) { }.String() } +func TestBackgroundStartFailWS(t *testing.T) { + h := &FFTokens{} + h.InitConfig(ffTokensConfig) + + // Create a listener and close it - to grab a port we know is not in use + badListener := httptest.NewServer(&http.ServeMux{}) + badURL := badListener.URL + badListener.Close() + + // Bad url for WS should fail and retry + ffTokensConfig.AddKnownKey(ffresty.HTTPConfigURL, badURL) + ffTokensConfig.Set(FFTBackgroundStart, true) + ffTokensConfig.Set(wsclient.WSConfigKeyInitialConnectAttempts, 1) + + ctx, cancelCtx := context.WithCancel(context.Background()) + err := h.Init(ctx, cancelCtx, "testtokens", ffTokensConfig) + assert.NoError(t, err) + assert.NotNil(t, h.backgroundRetry) + + capturedErr := make(chan error) + h.backgroundRetry = &retry.Retry{ + ErrCallback: func(err error) { + capturedErr <- err + }, + } + + err = h.Start() + assert.NoError(t, err) + + err = <-capturedErr + assert.Regexp(t, "FF00148", err) +} + +func TestReceiptEventsBackgroundStart(t *testing.T) { + + h, _, fromServer, _, done := newTestFFTokens(t) + defer done() + + ffTokensConfig.Set(FFTBackgroundStart, true) + + err := h.Init(h.ctx, h.cancelCtx, "testtokens", ffTokensConfig) + assert.NoError(t, err) + + // Reset the retry to be quicker + h.backgroundRetry = &retry.Retry{} + + err = h.Start() + assert.NoError(t, err) + + mcb := &coremocks.OperationCallbacks{} + h.SetOperationHandler("ns1", mcb) + opID := fftypes.NewUUID() + mockCalled := make(chan bool) + + // receipt: bad ID - passed through + mcb.On("OperationUpdate", mock.MatchedBy(func(update *core.OperationUpdate) bool { + return update.NamespacedOpID == "ns1:wrong" && + update.Status == core.OpStatusPending && + update.Plugin == "fftokens" + })).Return(nil).Once().Run(func(args mock.Arguments) { mockCalled <- true }) + fromServer <- fftypes.JSONObject{ + "id": "3", + "event": "receipt", + "data": fftypes.JSONObject{ + "headers": fftypes.JSONObject{ + "requestId": "ns1:wrong", // passed through to OperationUpdate to ignore + "type": "TransactionUpdate", + }, + }, + }.String() + <-mockCalled + + // receipt: success + mcb.On("OperationUpdate", mock.MatchedBy(func(update *core.OperationUpdate) bool { + return update.NamespacedOpID == "ns1:"+opID.String() && + update.Status == core.OpStatusSucceeded && + update.BlockchainTXID == "0xffffeeee" && + update.Plugin == "fftokens" + })).Return(nil).Once().Run(func(args mock.Arguments) { mockCalled <- true }) + fromServer <- fftypes.JSONObject{ + "id": "4", + "event": "receipt", + "data": fftypes.JSONObject{ + "headers": fftypes.JSONObject{ + "requestId": "ns1:" + opID.String(), + "type": "TransactionSuccess", + }, + "transactionHash": "0xffffeeee", + }, + }.String() + <-mockCalled + + // receipt: update + mcb.On("OperationUpdate", mock.MatchedBy(func(update *core.OperationUpdate) bool { + return update.NamespacedOpID == "ns1:"+opID.String() && + update.Status == core.OpStatusPending && + update.BlockchainTXID == "0xffffeeee" + })).Return(nil).Once().Run(func(args mock.Arguments) { mockCalled <- true }) + fromServer <- fftypes.JSONObject{ + "id": "5", + "event": "receipt", + "data": fftypes.JSONObject{ + "headers": fftypes.JSONObject{ + "requestId": "ns1:" + opID.String(), + "type": "TransactionUpdate", + }, + "transactionHash": "0xffffeeee", + }, + }.String() + <-mockCalled + + // receipt: failure + mcb.On("OperationUpdate", mock.MatchedBy(func(update *core.OperationUpdate) bool { + return update.NamespacedOpID == "ns1:"+opID.String() && + update.Status == core.OpStatusFailed && + update.BlockchainTXID == "0xffffeeee" && + update.Plugin == "fftokens" + })).Return(nil).Once().Run(func(args mock.Arguments) { mockCalled <- true }) + fromServer <- fftypes.JSONObject{ + "id": "5", + "event": "receipt", + "data": fftypes.JSONObject{ + "headers": fftypes.JSONObject{ + "requestId": "ns1:" + opID.String(), + "type": "TransactionFailed", + }, + "transactionHash": "0xffffeeee", + }, + }.String() + <-mockCalled + + mcb.AssertExpectations(t) +} func TestReceiptEvents(t *testing.T) { h, _, fromServer, _, done := newTestFFTokens(t) defer done() @@ -1043,7 +1246,7 @@ func TestPoolEvents(t *testing.T) { "id": "8", "event": "token-pool", "data": fftypes.JSONObject{ - "id": "000000000010/000020/000030/000040", + "id": "000000000010/000020/000030/000041", "poolData": "ns1", "type": "fungible", "poolLocator": "F1", @@ -1060,7 +1263,27 @@ func TestPoolEvents(t *testing.T) { msg = <-toServer assert.Equal(t, `{"data":{"id":"8"},"event":"ack"}`, string(msg)) - // token-pool: batch + callback fail + // token-pool: no handler + fromServer <- fftypes.JSONObject{ + "id": "10", + "event": "token-pool", + "data": fftypes.JSONObject{ + "id": "000000000010/000020/000030/000042", + "poolData": "BAD-NAMESPACE", + "type": "fungible", + "poolLocator": "F1", + "signer": "0x0", + "data": fftypes.JSONObject{"tx": txID.String()}.String(), + "blockchain": fftypes.JSONObject{ + "id": "000000000010/000020/000030", + "info": fftypes.JSONObject{ + "transactionHash": "0xffffeeee", + }, + }, + }, + }.String() + + // token-pool: batch + callback fail (terminates loop) mcb.On("TokenPoolCreated", mock.Anything, h, mock.MatchedBy(func(p *tokens.TokenPool) bool { return p.PoolLocator == "F1" && p.Type == core.TokenTypeFungible && txID.Equals(p.TX.ID) && p.Event.ProtocolID == "000000000010/000020/000030" })).Return(fmt.Errorf("pop")).Once() @@ -1071,7 +1294,7 @@ func TestPoolEvents(t *testing.T) { "events": fftypes.JSONObjectArray{{ "event": "token-pool", "data": fftypes.JSONObject{ - "id": "000000000010/000020/000030/000040", + "id": "000000000010/000020/000030/000043", "type": "fungible", "poolLocator": "F1", "signer": "0x0", @@ -1114,6 +1337,7 @@ func TestTransferEvents(t *testing.T) { "data": fftypes.JSONObject{ "id": "1.0.0", "poolLocator": "F1", + "poolData": "ns1|" + fftypes.NewUUID().String(), "tokenIndex": "0", "signer": "0x0", "to": "0x0", @@ -1138,7 +1362,7 @@ func TestTransferEvents(t *testing.T) { "event": "token-mint", "data": fftypes.JSONObject{ "id": "000000000010/000020/000030/000040", - "poolData": "ns1", + "poolData": "ns1|id1", "poolLocator": "F1", "signer": "0x0", "to": "0x0", @@ -1674,3 +1898,31 @@ func TestHandleEventRetryableFailure(t *testing.T) { assert.Regexp(t, "pop", err) assert.True(t, retry) } + +func TestErrorWrappingNoBodyError(t *testing.T) { + ctx := context.Background() + res := &resty.Response{ + RawResponse: &http.Response{StatusCode: 409}, + } + err := wrapError(ctx, nil, res, fmt.Errorf("pop")) + assert.Regexp(t, "FF10459", err) + assert.Regexp(t, "pop", err) + + errInterface, ok := err.(operations.ConflictError) + assert.True(t, ok) + assert.True(t, errInterface.IsConflictError()) +} + +func TestErrorWrappingBodyErr(t *testing.T) { + ctx := context.Background() + res := &resty.Response{ + RawResponse: &http.Response{StatusCode: 409}, + } + err := wrapError(ctx, &tokenError{Error: "snap"}, res, fmt.Errorf("pop")) + assert.Regexp(t, "FF10459", err) + assert.Regexp(t, "snap", err) + + errInterface, ok := err.(operations.ConflictError) + assert.True(t, ok) + assert.True(t, errInterface.IsConflictError()) +} diff --git a/internal/txcommon/token_inputs.go b/internal/txcommon/token_inputs.go index 9e10a1e926..f1ea41be43 100644 --- a/internal/txcommon/token_inputs.go +++ b/internal/txcommon/token_inputs.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // diff --git a/internal/txcommon/txcommon.go b/internal/txcommon/txcommon.go index 80783d5cf5..84c18ceb13 100644 --- a/internal/txcommon/txcommon.go +++ b/internal/txcommon/txcommon.go @@ -18,22 +18,28 @@ package txcommon import ( "context" + "database/sql/driver" "strings" "github.com/hyperledger/firefly-common/pkg/fftypes" + "github.com/hyperledger/firefly-common/pkg/i18n" "github.com/hyperledger/firefly-common/pkg/log" "github.com/hyperledger/firefly/internal/cache" "github.com/hyperledger/firefly/internal/coreconfig" + "github.com/hyperledger/firefly/internal/coremsgs" "github.com/hyperledger/firefly/internal/data" + "github.com/hyperledger/firefly/internal/database/sqlcommon" "github.com/hyperledger/firefly/pkg/core" "github.com/hyperledger/firefly/pkg/database" ) type Helper interface { SubmitNewTransaction(ctx context.Context, txType core.TransactionType, idempotencyKey core.IdempotencyKey) (*fftypes.UUID, error) + SubmitNewTransactionBatch(ctx context.Context, namespace string, batch []*BatchedTransactionInsert) error PersistTransaction(ctx context.Context, id *fftypes.UUID, txType core.TransactionType, blockchainTXID string) (valid bool, err error) AddBlockchainTX(ctx context.Context, tx *core.Transaction, blockchainTXID string) error InsertOrGetBlockchainEvent(ctx context.Context, event *core.BlockchainEvent) (existing *core.BlockchainEvent, err error) + InsertNewBlockchainEvents(ctx context.Context, events []*core.BlockchainEvent) (inserted []*core.BlockchainEvent, err error) GetTransactionByIDCached(ctx context.Context, id *fftypes.UUID) (*core.Transaction, error) GetBlockchainEventByIDCached(ctx context.Context, id *fftypes.UUID) (*core.BlockchainEvent, error) FindOperationInTransaction(ctx context.Context, tx *fftypes.UUID, opType core.OpType) (*core.Operation, error) @@ -47,6 +53,19 @@ type transactionHelper struct { blockchainEventCache cache.CInterface } +type BatchedTransactionInsert struct { + Input TransactionInsertInput + Output struct { + IdempotencyError error + Transaction *core.Transaction + } +} + +type TransactionInsertInput struct { + Type core.TransactionType + IdempotencyKey core.IdempotencyKey +} + func NewTransactionHelper(ctx context.Context, ns string, di database.Plugin, dm data.Manager, cacheManager cache.Manager) (Helper, error) { t := &transactionHelper{ namespace: ns, @@ -127,6 +146,98 @@ func (t *transactionHelper) SubmitNewTransaction(ctx context.Context, txType cor return tx.ID, nil } +// SubmitTransactionBatch is called to do a batch insertion of a set of transactions, and returns an array of the transaction +// result. Each is either a transaction, or an idempotency failure. The overall action fails for DB errors other than idempotency. +func (t *transactionHelper) SubmitNewTransactionBatch(ctx context.Context, namespace string, batch []*BatchedTransactionInsert) error { + + // Sort our transactions into those with/without idempotency keys, and do a pre-check for duplicate + // idempotency keys within the batch. + plainTxInserts := make([]*core.Transaction, 0, len(batch)) + idempotentTxInserts := make([]*core.Transaction, 0, len(batch)) + idempotencyKeyMap := make(map[core.IdempotencyKey]*BatchedTransactionInsert) + for _, t := range batch { + t.Output.Transaction = &core.Transaction{ + ID: fftypes.NewUUID(), + Namespace: namespace, + Type: t.Input.Type, + IdempotencyKey: t.Input.IdempotencyKey, + } + if t.Input.IdempotencyKey == "" { + plainTxInserts = append(plainTxInserts, t.Output.Transaction) + } else { + if existing := idempotencyKeyMap[t.Input.IdempotencyKey]; existing != nil { + // We've got the same idempotency key twice in our batch. Fail the second one as a dup of the first + log.L(ctx).Warnf("Idempotency key exists twice in insert batch '%s'", t.Input.IdempotencyKey) + t.Output.IdempotencyError = &sqlcommon.IdempotencyError{ + ExistingTXID: existing.Output.Transaction.ID, + OriginalError: i18n.NewError(ctx, coremsgs.MsgIdempotencyKeyDuplicateTransaction, t.Input.IdempotencyKey, existing.Output.Transaction.ID), + } + } else { + idempotencyKeyMap[t.Input.IdempotencyKey] = t + idempotentTxInserts = append(idempotentTxInserts, t.Output.Transaction) + } + } + } + + // First attempt to insert any transactions without an idempotency key. These should all work + if len(plainTxInserts) > 0 { + if insertErr := t.database.InsertTransactions(ctx, plainTxInserts); insertErr != nil { + return insertErr + } + } + + // Then attempt to insert all the transactions with idempotency keys, which might result in + // partial success. + if len(idempotentTxInserts) > 0 { + if insertErr := t.database.InsertTransactions(ctx, idempotentTxInserts); insertErr != nil { + // We have either an error, or a mixed result. Do a query to find all the idempotencyKeys. + // If we find them all, then we're good to continue, after we've used UUID comparison + // to check which idempotency keys clashed. + log.L(ctx).Warnf("Insert transaction batch failed. Checking for idempotencyKey duplicates: %s", insertErr) + idempotencyKeys := make([]driver.Value, len(idempotentTxInserts)) + for i := 0; i < len(idempotentTxInserts); i++ { + idempotencyKeys[i] = idempotentTxInserts[i].IdempotencyKey + } + fb := database.TransactionQueryFactory.NewFilter(ctx) + resolvedTxns, _, queryErr := t.database.GetTransactions(ctx, namespace, fb.In("idempotencykey", idempotencyKeys)) + if queryErr != nil { + log.L(ctx).Errorf("idempotencyKey duplicate check abandoned, due to query error (%s). Returning original insert err: %s", queryErr, insertErr) + return insertErr + } + if len(resolvedTxns) != len(idempotencyKeys) { + log.L(ctx).Errorf("idempotencyKey duplicate check abandoned, due to query not returning all transactions - len=%d, expected=%d. Returning original insert err: %s", len(resolvedTxns), len(idempotencyKeys), insertErr) + return insertErr + } + for _, resolvedTxn := range resolvedTxns { + // Processing above makes it safe for us to do this + expectedEntry := idempotencyKeyMap[resolvedTxn.IdempotencyKey] + if !resolvedTxn.ID.Equals(expectedEntry.Output.Transaction.ID) { + log.L(ctx).Warnf("Idempotency key '%s' already existed in database for transaction %s", resolvedTxn.IdempotencyKey, resolvedTxn.ID) + expectedEntry.Output.IdempotencyError = &sqlcommon.IdempotencyError{ + ExistingTXID: resolvedTxn.ID, + OriginalError: i18n.NewError(ctx, coremsgs.MsgIdempotencyKeyDuplicateTransaction, resolvedTxn.IdempotencyKey, resolvedTxn.ID), + } + } + } + } + } + + // Insert events for all transactions that did not have an idempotency key failure + // Note event insertion is already optimized within the database layer. + for _, entry := range batch { + if entry.Output.IdempotencyError == nil { + tx := entry.Output.Transaction + if err := t.database.InsertEvent(ctx, core.NewEvent(core.EventTypeTransactionSubmitted, tx.Namespace, tx.ID, tx.ID, tx.Type.String())); err != nil { + return err + } + t.updateTransactionsCache(tx) + } + } + + // Ok - we're done + return nil +} + // PersistTransaction is called when we need to ensure a transaction exists in the DB, and optionally associate a new BlockchainTXID to it func (t *transactionHelper) PersistTransaction(ctx context.Context, id *fftypes.UUID, txType core.TransactionType, blockchainTXID string) (valid bool, err error) { @@ -215,6 +326,54 @@ func (t *transactionHelper) InsertOrGetBlockchainEvent(ctx context.Context, even return nil, nil } +func (t *transactionHelper) InsertNewBlockchainEvents(ctx context.Context, events []*core.BlockchainEvent) (inserted []*core.BlockchainEvent, err error) { + // First we try and insert the whole bundle using batch insert + err = t.database.InsertBlockchainEvents(ctx, events, func() { + for _, event := range events { + t.addBlockchainEventToCache(event) + } + }) + if err == nil { + // happy path worked - all new events + return events, nil + } + + // Fall back to insert-or-get + log.L(ctx).Warnf("Blockchain event insert-many optimization failed: %s", err) + inserted = make([]*core.BlockchainEvent, 0, len(events)) + for _, event := range events { + existing, err := t.database.InsertOrGetBlockchainEvent(ctx, event) + if err != nil { + return nil, err + } + + if existing != nil { + // It's possible the batch insert was partially successful, and this is actually a "new" row. + // Look to see if the corresponding entry also exists in the "events" table. + fb := database.EventQueryFactory.NewFilter(ctx) + notifications, _, err := t.database.GetEvents(ctx, t.namespace, fb.And( + fb.Eq("type", core.EventTypeBlockchainEventReceived), + fb.Eq("reference", existing.ID), + )) + if err != nil { + return nil, err + } + if len(notifications) == 0 { + log.L(ctx).Debugf("Detected partial success from batch insert on blockchain event %s", existing.ProtocolID) + inserted = append(inserted, existing) // notify caller that this is actually a new row + } else { + log.L(ctx).Debugf("Ignoring duplicate blockchain event %s", existing.ProtocolID) + } + t.addBlockchainEventToCache(existing) + } else { + inserted = append(inserted, event) + t.addBlockchainEventToCache(event) + } + } + + return inserted, nil +} + func (t *transactionHelper) FindOperationInTransaction(ctx context.Context, tx *fftypes.UUID, opType core.OpType) (*core.Operation, error) { fb := database.OperationQueryFactory.NewFilter(ctx) filter := fb.And( diff --git a/internal/txcommon/txcommon_test.go b/internal/txcommon/txcommon_test.go index c6eaaf7ff5..903b0081a1 100644 --- a/internal/txcommon/txcommon_test.go +++ b/internal/txcommon/txcommon_test.go @@ -28,7 +28,6 @@ import ( "github.com/hyperledger/firefly-common/pkg/fftypes" "github.com/hyperledger/firefly/internal/cache" "github.com/hyperledger/firefly/internal/coreconfig" - "github.com/hyperledger/firefly/internal/data" "github.com/hyperledger/firefly/mocks/cachemocks" "github.com/hyperledger/firefly/mocks/databasemocks" "github.com/hyperledger/firefly/mocks/datamocks" @@ -38,15 +37,33 @@ import ( "github.com/stretchr/testify/mock" ) -func NewTestTransactionHelper(di database.Plugin, dm data.Manager) (Helper, cache.CInterface, cache.CInterface) { - t := &transactionHelper{ +type testTransactionHelper struct { + transactionHelper + + mdi *databasemocks.Plugin + mdm *datamocks.Manager +} + +func (tth *testTransactionHelper) cleanup(t *testing.T) { + tth.mdi.AssertExpectations(t) + tth.mdm.AssertExpectations(t) +} + +func NewTestTransactionHelper() (*testTransactionHelper, cache.CInterface, cache.CInterface) { + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} + t := transactionHelper{ namespace: "ns1", - database: di, - data: dm, + database: mdi, + data: mdm, } t.transactionCache = cache.NewUmanagedCache(context.Background(), config.GetByteSize(coreconfig.CacheTransactionSize), config.GetDuration(coreconfig.CacheTransactionTTL)) t.blockchainEventCache = cache.NewUmanagedCache(context.Background(), config.GetByteSize(coreconfig.CacheBlockchainEventLimit), config.GetDuration(coreconfig.CacheBlockchainEventTTL)) - return t, t.transactionCache, t.blockchainEventCache + return &testTransactionHelper{ + transactionHelper: t, + mdi: mdi, + mdm: mdm, + }, t.transactionCache, t.blockchainEventCache } func TestSubmitNewTransactionOK(t *testing.T) { @@ -205,7 +222,7 @@ func TestPersistTransactionExistingAddBlockchainID(t *testing.T) { mdm := &datamocks.Manager{} ctx := context.Background() cmi := &cachemocks.Manager{} - cache := cache.NewUmanagedCache(ctx, 100, 5*time.Minute) + cache := cache.NewUmanagedCache(ctx, 1024, 5*time.Minute) cmi.On("GetCache", mock.Anything).Return(cache, nil) txHelper, _ := NewTransactionHelper(ctx, "ns1", mdi, mdm, cmi) @@ -449,13 +466,12 @@ func TestAddBlockchainTXUnchanged(t *testing.T) { func TestGetTransactionByIDCached(t *testing.T) { - mdi := &databasemocks.Plugin{} - mdm := &datamocks.Manager{} - txHelper, _, _ := NewTestTransactionHelper(mdi, mdm) + txHelper, _, _ := NewTestTransactionHelper() + defer txHelper.cleanup(t) ctx := context.Background() txid := fftypes.NewUUID() - mdi.On("GetTransactionByID", ctx, "ns1", txid).Return(&core.Transaction{ + txHelper.mdi.On("GetTransactionByID", ctx, "ns1", txid).Return(&core.Transaction{ ID: txid, Namespace: "ns1", Type: core.TransactionTypeContractInvoke, @@ -472,25 +488,20 @@ func TestGetTransactionByIDCached(t *testing.T) { assert.NoError(t, err) assert.Equal(t, txid, tx.ID) - mdi.AssertExpectations(t) - } func TestGetTransactionByIDCachedFail(t *testing.T) { - mdi := &databasemocks.Plugin{} - mdm := &datamocks.Manager{} - txHelper, _, _ := NewTestTransactionHelper(mdi, mdm) + txHelper, _, _ := NewTestTransactionHelper() + defer txHelper.cleanup(t) ctx := context.Background() txid := fftypes.NewUUID() - mdi.On("GetTransactionByID", ctx, "ns1", txid).Return(nil, fmt.Errorf("pop")) + txHelper.mdi.On("GetTransactionByID", ctx, "ns1", txid).Return(nil, fmt.Errorf("pop")) _, err := txHelper.GetTransactionByIDCached(ctx, txid) assert.EqualError(t, err, "pop") - mdi.AssertExpectations(t) - } func TestGetBlockchainEventByIDCached(t *testing.T) { @@ -567,7 +578,6 @@ func TestInsertGetBlockchainEventCached(t *testing.T) { cmi := &cachemocks.Manager{} cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(ctx, 100, 5*time.Minute), nil) txHelper, _ := NewTransactionHelper(ctx, "ns1", mdi, mdm, cmi) - evID := fftypes.NewUUID() chainEvent := &core.BlockchainEvent{ ID: evID, @@ -581,12 +591,11 @@ func TestInsertGetBlockchainEventCached(t *testing.T) { cached, err := txHelper.GetBlockchainEventByIDCached(ctx, evID) assert.NoError(t, err) assert.Equal(t, chainEvent, cached) - mdi.AssertExpectations(t) } -func TestInsertBlockchainEventDuplicate(t *testing.T) { +func TestInsertGetBlockchainEventDuplicate(t *testing.T) { mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} @@ -594,7 +603,6 @@ func TestInsertBlockchainEventDuplicate(t *testing.T) { cmi := &cachemocks.Manager{} cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(ctx, 100, 5*time.Minute), nil) txHelper, _ := NewTransactionHelper(ctx, "ns1", mdi, mdm, cmi) - evID := fftypes.NewUUID() chainEvent := &core.BlockchainEvent{ ID: evID, @@ -611,6 +619,152 @@ func TestInsertBlockchainEventDuplicate(t *testing.T) { } +func TestInsertGetBlockchainEventErr(t *testing.T) { + + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} + ctx := context.Background() + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(ctx, 100, 5*time.Minute), nil) + txHelper, _ := NewTransactionHelper(ctx, "ns1", mdi, mdm, cmi) + evID := fftypes.NewUUID() + chainEvent := &core.BlockchainEvent{ + ID: evID, + Namespace: "ns1", + } + mdi.On("InsertOrGetBlockchainEvent", ctx, chainEvent).Return(nil, fmt.Errorf("pop")) + + _, err := txHelper.InsertOrGetBlockchainEvent(ctx, chainEvent) + assert.Regexp(t, "pop", err) + + mdi.AssertExpectations(t) + +} + +func TestInsertNewBlockchainEventsOptimized(t *testing.T) { + + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} + ctx := context.Background() + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(ctx, 100, 5*time.Minute), nil) + txHelper, _ := NewTransactionHelper(ctx, "ns1", mdi, mdm, cmi) + + evID := fftypes.NewUUID() + chainEvent := &core.BlockchainEvent{ + ID: evID, + Namespace: "ns1", + } + mdi.On("InsertBlockchainEvents", ctx, []*core.BlockchainEvent{chainEvent}, mock.Anything). + Run(func(args mock.Arguments) { + cb := args[2].(database.PostCompletionHook) + cb() + }). + Return(nil) + + _, err := txHelper.InsertNewBlockchainEvents(ctx, []*core.BlockchainEvent{chainEvent}) + assert.NoError(t, err) + + cached, err := txHelper.GetBlockchainEventByIDCached(ctx, evID) + assert.NoError(t, err) + assert.Equal(t, chainEvent, cached) + + mdi.AssertExpectations(t) + +} + +func TestInsertNewBlockchainEventsEventCached(t *testing.T) { + + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} + ctx := context.Background() + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(ctx, 100, 5*time.Minute), nil) + txHelper, _ := NewTransactionHelper(ctx, "ns1", mdi, mdm, cmi) + + evID := fftypes.NewUUID() + chainEvent := &core.BlockchainEvent{ + ID: evID, + Namespace: "ns1", + } + mdi.On("InsertBlockchainEvents", ctx, []*core.BlockchainEvent{chainEvent}, mock.Anything).Return(fmt.Errorf("optimization bypass")) + mdi.On("InsertOrGetBlockchainEvent", ctx, chainEvent).Return(nil, nil) + + _, err := txHelper.InsertNewBlockchainEvents(ctx, []*core.BlockchainEvent{chainEvent}) + assert.NoError(t, err) + + cached, err := txHelper.GetBlockchainEventByIDCached(ctx, evID) + assert.NoError(t, err) + assert.Equal(t, chainEvent, cached) + + mdi.AssertExpectations(t) + +} + +func TestInsertBlockchainEventDuplicate(t *testing.T) { + + txHelper, _, _ := NewTestTransactionHelper() + defer txHelper.cleanup(t) + ctx := context.Background() + + evID := fftypes.NewUUID() + chainEvent := &core.BlockchainEvent{ + ID: evID, + Namespace: "ns1", + } + existingEvent := &core.BlockchainEvent{} + txHelper.mdi.On("InsertBlockchainEvents", ctx, []*core.BlockchainEvent{chainEvent}, mock.Anything).Return(fmt.Errorf("optimization bypass")) + txHelper.mdi.On("InsertOrGetBlockchainEvent", ctx, chainEvent).Return(existingEvent, nil) + txHelper.mdi.On("GetEvents", ctx, "ns1", mock.Anything).Return([]*core.Event{{}}, nil, nil) + + result, err := txHelper.InsertNewBlockchainEvents(ctx, []*core.BlockchainEvent{chainEvent}) + assert.NoError(t, err) + assert.Empty(t, result) + +} + +func TestInsertBlockchainEventFailEventQuery(t *testing.T) { + + txHelper, _, _ := NewTestTransactionHelper() + defer txHelper.cleanup(t) + ctx := context.Background() + + chainEvent := &core.BlockchainEvent{ + ID: fftypes.NewUUID(), + Namespace: "ns1", + } + existingEvent := &core.BlockchainEvent{} + txHelper.mdi.On("InsertBlockchainEvents", ctx, []*core.BlockchainEvent{chainEvent}, mock.Anything).Return(fmt.Errorf("optimization bypass")) + txHelper.mdi.On("InsertOrGetBlockchainEvent", ctx, chainEvent).Return(existingEvent, nil) + txHelper.mdi.On("GetEvents", ctx, "ns1", mock.Anything).Return(nil, nil, fmt.Errorf("pop")) + + _, err := txHelper.InsertNewBlockchainEvents(ctx, []*core.BlockchainEvent{chainEvent}) + assert.EqualError(t, err, "pop") + +} + +func TestInsertBlockchainEventPartialBatch(t *testing.T) { + + txHelper, _, _ := NewTestTransactionHelper() + defer txHelper.cleanup(t) + ctx := context.Background() + + chainEvent := &core.BlockchainEvent{ + ID: fftypes.NewUUID(), + Namespace: "ns1", + } + existingEvent := &core.BlockchainEvent{} + txHelper.mdi.On("InsertBlockchainEvents", ctx, []*core.BlockchainEvent{chainEvent}, mock.Anything).Return(fmt.Errorf("optimization bypass")) + txHelper.mdi.On("InsertOrGetBlockchainEvent", ctx, chainEvent).Return(existingEvent, nil) + txHelper.mdi.On("GetEvents", ctx, "ns1", mock.Anything).Return([]*core.Event{}, nil, nil) + + result, err := txHelper.InsertNewBlockchainEvents(ctx, []*core.BlockchainEvent{chainEvent}) + assert.NoError(t, err) + assert.Len(t, result, 1) + assert.Equal(t, existingEvent, result[0]) + +} + func TestInsertBlockchainEventErr(t *testing.T) { mdi := &databasemocks.Plugin{} @@ -625,9 +779,10 @@ func TestInsertBlockchainEventErr(t *testing.T) { ID: evID, Namespace: "ns1", } + mdi.On("InsertBlockchainEvents", ctx, []*core.BlockchainEvent{chainEvent}, mock.Anything).Return(fmt.Errorf("optimization bypass")) mdi.On("InsertOrGetBlockchainEvent", ctx, chainEvent).Return(nil, fmt.Errorf("pop")) - _, err := txHelper.InsertOrGetBlockchainEvent(ctx, chainEvent) + _, err := txHelper.InsertNewBlockchainEvents(ctx, []*core.BlockchainEvent{chainEvent}) assert.Regexp(t, "pop", err) mdi.AssertExpectations(t) @@ -673,3 +828,258 @@ func TestFindOperationInTransactionFail(t *testing.T) { mdi.AssertExpectations(t) } + +func TestSubmitNewTransactionBatchAllPlainOk(t *testing.T) { + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} + ctx := context.Background() + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(ctx, 100, 5*time.Minute), nil) + txHelper, _ := NewTransactionHelper(ctx, "ns1", mdi, mdm, cmi) + + batch := make([]*BatchedTransactionInsert, 5) + for i := 0; i < len(batch); i++ { + batch[i] = &BatchedTransactionInsert{ + Input: TransactionInsertInput{ + Type: core.BatchTypePrivate, + IdempotencyKey: "", // this makes them all plain + }, + } + } + mdi.On("InsertTransactions", ctx, mock.MatchedBy(func(transactions []*core.Transaction) bool { + return len(transactions) == len(batch) + })).Return(nil).Once() + mdi.On("InsertEvent", ctx, mock.MatchedBy(func(e *core.Event) bool { + return e.Type == core.EventTypeTransactionSubmitted + })).Return(nil).Times(len(batch)) + + err := txHelper.SubmitNewTransactionBatch(ctx, "ns1", batch) + assert.NoError(t, err) + + mdi.AssertExpectations(t) +} + +func TestSubmitNewTransactionBatchAllPlainFail(t *testing.T) { + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} + ctx := context.Background() + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(ctx, 100, 5*time.Minute), nil) + txHelper, _ := NewTransactionHelper(ctx, "ns1", mdi, mdm, cmi) + + batch := make([]*BatchedTransactionInsert, 5) + for i := 0; i < len(batch); i++ { + batch[i] = &BatchedTransactionInsert{ + Input: TransactionInsertInput{ + Type: core.BatchTypePrivate, + IdempotencyKey: "", // this makes them all plain + }, + } + } + mdi.On("InsertTransactions", ctx, mock.MatchedBy(func(transactions []*core.Transaction) bool { + return len(transactions) == len(batch) + })).Return(fmt.Errorf("pop")).Once() + + err := txHelper.SubmitNewTransactionBatch(ctx, "ns1", batch) + assert.Regexp(t, "pop", err) + + mdi.AssertExpectations(t) +} + +func TestSubmitNewTransactionBatchMixSucceedOptimized(t *testing.T) { + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} + ctx := context.Background() + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(ctx, 100, 5*time.Minute), nil) + txHelper, _ := NewTransactionHelper(ctx, "ns1", mdi, mdm, cmi) + + batch := make([]*BatchedTransactionInsert, 6) + for i := 0; i < len(batch); i++ { + batch[i] = &BatchedTransactionInsert{ + Input: TransactionInsertInput{ + Type: core.BatchTypePrivate, + }, + } + if i%2 == 0 { + batch[i].Input.IdempotencyKey = core.IdempotencyKey(fmt.Sprintf("idem_%.3d", i)) + } + } + mdi.On("InsertTransactions", ctx, mock.MatchedBy(func(transactions []*core.Transaction) bool { + return len(transactions) == len(batch)/2 + })).Return(nil).Twice() // once for non-idempotent, once for idempotent + mdi.On("InsertEvent", ctx, mock.MatchedBy(func(e *core.Event) bool { + return e.Type == core.EventTypeTransactionSubmitted + })).Return(nil).Times(len(batch)) + + err := txHelper.SubmitNewTransactionBatch(ctx, "ns1", batch) + assert.NoError(t, err) + + mdi.AssertExpectations(t) +} + +func TestSubmitNewTransactionBatchAllIdempotentDup(t *testing.T) { + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} + ctx := context.Background() + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(ctx, 100, 5*time.Minute), nil) + txHelper, _ := NewTransactionHelper(ctx, "ns1", mdi, mdm, cmi) + + batch := make([]*BatchedTransactionInsert, 3) + for i := 0; i < len(batch); i++ { + batch[i] = &BatchedTransactionInsert{ + Input: TransactionInsertInput{ + Type: core.BatchTypePrivate, + IdempotencyKey: core.IdempotencyKey(fmt.Sprintf("idem_%.3d", i)), + }, + } + } + mdi.On("InsertTransactions", ctx, mock.MatchedBy(func(transactions []*core.Transaction) bool { + return len(transactions) == len(batch) + })).Return(fmt.Errorf("go check for dups")).Once() + mdi.On("GetTransactions", ctx, "ns1", mock.Anything).Return( + []*core.Transaction{ + {ID: fftypes.NewUUID(), IdempotencyKey: "idem_002"}, + {ID: fftypes.NewUUID(), IdempotencyKey: "idem_000"}, + {ID: fftypes.NewUUID(), IdempotencyKey: "idem_001"}, + }, + nil, nil, + ).Once() + + err := txHelper.SubmitNewTransactionBatch(ctx, "ns1", batch) + assert.NoError(t, err) + + for i := 0; i < len(batch); i++ { + assert.Regexp(t, "FF10431.*"+batch[i].Input.IdempotencyKey, batch[i].Output.IdempotencyError) + } + + mdi.AssertExpectations(t) +} + +func TestSubmitNewTransactionBatchQueryFailForIdempotencyCheck(t *testing.T) { + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} + ctx := context.Background() + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(ctx, 100, 5*time.Minute), nil) + txHelper, _ := NewTransactionHelper(ctx, "ns1", mdi, mdm, cmi) + + batch := make([]*BatchedTransactionInsert, 3) + for i := 0; i < len(batch); i++ { + batch[i] = &BatchedTransactionInsert{ + Input: TransactionInsertInput{ + Type: core.BatchTypePrivate, + IdempotencyKey: core.IdempotencyKey(fmt.Sprintf("idem_%.3d", i)), + }, + } + } + mdi.On("InsertTransactions", ctx, mock.MatchedBy(func(transactions []*core.Transaction) bool { + return len(transactions) == len(batch) + })).Return(fmt.Errorf("fallback to throw this err")).Once() + mdi.On("GetTransactions", ctx, "ns1", mock.Anything).Return(nil, nil, fmt.Errorf("do not throw this error")).Once() + + err := txHelper.SubmitNewTransactionBatch(ctx, "ns1", batch) + assert.Regexp(t, "fallback to throw this err", err) + + mdi.AssertExpectations(t) +} + +func TestSubmitNewTransactionBatchFindWrongNumberOfRecords(t *testing.T) { + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} + ctx := context.Background() + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(ctx, 100, 5*time.Minute), nil) + txHelper, _ := NewTransactionHelper(ctx, "ns1", mdi, mdm, cmi) + + batch := make([]*BatchedTransactionInsert, 3) + for i := 0; i < len(batch); i++ { + batch[i] = &BatchedTransactionInsert{ + Input: TransactionInsertInput{ + Type: core.BatchTypePrivate, + IdempotencyKey: core.IdempotencyKey(fmt.Sprintf("idem_%.3d", i)), + }, + } + } + mdi.On("InsertTransactions", ctx, mock.MatchedBy(func(transactions []*core.Transaction) bool { + return len(transactions) == len(batch) + })).Return(fmt.Errorf("fallback to throw this err")).Once() + mdi.On("GetTransactions", ctx, "ns1", mock.Anything).Return( + []*core.Transaction{ + {ID: fftypes.NewUUID(), IdempotencyKey: "idem_002"}, // only one came back + }, + nil, nil, + ).Once() + + err := txHelper.SubmitNewTransactionBatch(ctx, "ns1", batch) + assert.Regexp(t, "fallback to throw this err", err) + + mdi.AssertExpectations(t) +} + +func TestSubmitNewTransactionBatchIdempotentDupInBatch(t *testing.T) { + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} + ctx := context.Background() + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(ctx, 100, 5*time.Minute), nil) + txHelper, _ := NewTransactionHelper(ctx, "ns1", mdi, mdm, cmi) + + batch := make([]*BatchedTransactionInsert, 3) + for i := 0; i < len(batch); i++ { + batch[i] = &BatchedTransactionInsert{ + Input: TransactionInsertInput{ + Type: core.BatchTypePrivate, + IdempotencyKey: "duplicated_in_all", + }, + } + } + mdi.On("InsertTransactions", ctx, mock.MatchedBy(func(transactions []*core.Transaction) bool { + return len(transactions) == 1 + })).Return(nil).Once() + mdi.On("InsertEvent", ctx, mock.MatchedBy(func(e *core.Event) bool { + return e.Type == core.EventTypeTransactionSubmitted + })).Return(nil).Once() + + err := txHelper.SubmitNewTransactionBatch(ctx, "ns1", batch) + assert.NoError(t, err) + + for i := 0; i < len(batch); i++ { + if i == 0 { + assert.NoError(t, batch[i].Output.IdempotencyError) + } else { + assert.Regexp(t, "FF10431.*duplicated_in_all", batch[i].Output.IdempotencyError) + } + } + + mdi.AssertExpectations(t) +} + +func TestSubmitNewTransactionBatchInsertEventFail(t *testing.T) { + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} + ctx := context.Background() + cmi := &cachemocks.Manager{} + cmi.On("GetCache", mock.Anything).Return(cache.NewUmanagedCache(ctx, 100, 5*time.Minute), nil) + txHelper, _ := NewTransactionHelper(ctx, "ns1", mdi, mdm, cmi) + + batch := []*BatchedTransactionInsert{ + { + Input: TransactionInsertInput{ + Type: core.BatchTypePrivate, + }, + }, + } + mdi.On("InsertTransactions", ctx, mock.MatchedBy(func(transactions []*core.Transaction) bool { + return len(transactions) == 1 + })).Return(nil).Once() + mdi.On("InsertEvent", ctx, mock.MatchedBy(func(e *core.Event) bool { + return e.Type == core.EventTypeTransactionSubmitted + })).Return(fmt.Errorf("pop")).Once() + + err := txHelper.SubmitNewTransactionBatch(ctx, "ns1", batch) + assert.Regexp(t, "pop", err) + + mdi.AssertExpectations(t) +} diff --git a/internal/txwriter/txwriter.go b/internal/txwriter/txwriter.go new file mode 100644 index 0000000000..4b68d02bc3 --- /dev/null +++ b/internal/txwriter/txwriter.go @@ -0,0 +1,242 @@ +// Copyright © 2023 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package txwriter + +import ( + "context" + "fmt" + "time" + + "github.com/hyperledger/firefly-common/pkg/config" + "github.com/hyperledger/firefly-common/pkg/i18n" + "github.com/hyperledger/firefly-common/pkg/log" + "github.com/hyperledger/firefly/internal/coreconfig" + "github.com/hyperledger/firefly/internal/coremsgs" + "github.com/hyperledger/firefly/internal/operations" + "github.com/hyperledger/firefly/internal/txcommon" + "github.com/hyperledger/firefly/pkg/core" + "github.com/hyperledger/firefly/pkg/database" +) + +type Writer interface { + Start() + WriteTransactionAndOps(ctx context.Context, txType core.TransactionType, idempotencyKey core.IdempotencyKey, operations ...*core.Operation) (*core.Transaction, error) + Close() +} + +// request is the dispatched fully validated blockchain transaction, with the previously +// resolved key, and validated signature & inputs ready to generate the operations +// for the blockchain connector. +// Could be a contract deployment, or a transaction invoke. +type request struct { + txType core.TransactionType + idempotencyKey core.IdempotencyKey + operations []*core.Operation + result chan *result +} + +type result struct { + transaction *core.Transaction + err error +} + +type txWriterBatch struct { + id string + requests []*request + timeoutContext context.Context + timeoutCancel func() +} + +// txWriter manages writing blockchain transactions and operations to the database. +// Does so with optimized multi-insert database operations, on a pool of routines +// that can manage many concurrent API requests efficiently against the DB. +type txWriter struct { + bgContext context.Context + cancelFunc func() + database database.Plugin + txHelper txcommon.Helper + operations operations.Manager + workQueue chan *request + workersDone []chan struct{} + closed bool + // Config + namespace string + workerCount int + batchTimeout time.Duration + batchMax int +} + +func NewTransactionWriter(ctx context.Context, ns string, di database.Plugin, txHelper txcommon.Helper, operations operations.Manager) Writer { + + workerCount := config.GetInt(coreconfig.TransactionWriterCount) + if !di.Capabilities().Concurrency { + log.L(ctx).Infof("Database plugin not configured for concurrency. Batched transaction writing disabled") + workerCount = 0 + } + tw := &txWriter{ + namespace: ns, + workerCount: workerCount, + batchTimeout: config.GetDuration(coreconfig.TransactionWriterBatchTimeout), + batchMax: config.GetInt(coreconfig.TransactionWriterBatchMaxTransactions), + + database: di, + txHelper: txHelper, + operations: operations, + } + tw.bgContext, tw.cancelFunc = context.WithCancel(ctx) + return tw +} + +func (tw *txWriter) WriteTransactionAndOps(ctx context.Context, txType core.TransactionType, idempotencyKey core.IdempotencyKey, operations ...*core.Operation) (*core.Transaction, error) { + req := &request{ + txType: txType, + idempotencyKey: idempotencyKey, + operations: operations, + result: make(chan *result, 1), // allocate a slot for the result to avoid blocking + } + if tw.workerCount == 0 { + // Workers disabled, execute in-line on the current context + // (note we provide a slot in the channel to ensure this doesn't block) + tw.executeBatch(ctx, &txWriterBatch{requests: []*request{req}}) + } else { + // Dispatch to background worker pool + select { + case tw.workQueue <- req: + case <-ctx.Done(): // caller context is cancelled before dispatch + return nil, i18n.NewError(ctx, coremsgs.MsgContextCanceled) + case <-tw.bgContext.Done(): // background context is cancelled before dispatch + return nil, i18n.NewError(ctx, coremsgs.MsgContextCanceled) + } + } + res := <-req.result + return res.transaction, res.err +} + +func (tw *txWriter) Start() { + if tw.workerCount > 0 { + tw.workQueue = make(chan *request) + tw.workersDone = make([]chan struct{}, tw.workerCount) + for i := 0; i < tw.workerCount; i++ { + tw.workersDone[i] = make(chan struct{}) + go tw.writerLoop(i) + } + } +} + +func (tw *txWriter) writerLoop(writerIndex int) { + defer close(tw.workersDone[writerIndex]) + + ctx := log.WithLogField(tw.bgContext, "job", fmt.Sprintf("txwriter_%.3d", writerIndex)) + var batchNumber int + var batch *txWriterBatch + for !tw.closed { + var timeoutContext context.Context + var timedOut bool + if batch != nil { + timeoutContext = batch.timeoutContext + } else { + timeoutContext = ctx + } + select { + case work := <-tw.workQueue: + if batch == nil { + batchNumber++ + batch = &txWriterBatch{id: fmt.Sprintf("txw_%.3d_%.10d", writerIndex, batchNumber)} + batch.timeoutContext, batch.timeoutCancel = context.WithTimeout(ctx, tw.batchTimeout) + } + batch.requests = append(batch.requests, work) + case <-timeoutContext.Done(): + timedOut = true + } + + if batch != nil && (timedOut || (len(batch.requests) >= tw.batchMax)) { + batch.timeoutCancel() + + tw.executeBatch(ctx, batch) + batch = nil + } + } +} + +func (tw *txWriter) executeBatch(ctx context.Context, batch *txWriterBatch) { + ctx = log.WithLogField(ctx, "batch", batch.id) + err := tw.database.RunAsGroup(ctx, func(ctx context.Context) error { + return tw.processBatch(ctx, batch) + }) + if err != nil { + for _, req := range batch.requests { + req.result <- &result{err: err} + } + } +} + +func (tw *txWriter) processBatch(ctx context.Context, batch *txWriterBatch) error { + // First we try to insert all the transactions + txInserts := make([]*txcommon.BatchedTransactionInsert, len(batch.requests)) + for i, req := range batch.requests { + txInserts[i] = &txcommon.BatchedTransactionInsert{ + Input: txcommon.TransactionInsertInput{ + Type: req.txType, + IdempotencyKey: req.idempotencyKey, + }, + } + } + if err := tw.txHelper.SubmitNewTransactionBatch(ctx, tw.namespace, txInserts); err != nil { + return err + } + // Then we work out the actual number of new ones + results := make([]*result, len(batch.requests)) + operations := make([]*core.Operation, 0, len(batch.requests)) + for i, insertResult := range txInserts { + req := batch.requests[i] + if insertResult.Output.IdempotencyError != nil { + results[i] = &result{err: insertResult.Output.IdempotencyError} + } else { + txn := insertResult.Output.Transaction + results[i] = &result{transaction: txn} + // Set the transaction ID on all ops, and add to list for insertion + for _, op := range req.operations { + op.Transaction = txn.ID + operations = append(operations, op) + } + } + } + + // Insert all the operations - these must be unique, as this is a brand new transaction + if len(operations) > 0 { + err := tw.operations.BulkInsertOperations(ctx, operations...) + if err != nil { + return err + } + } + + // Ok we're done. We're assured not to return an err, so we can dispatch the results to each + for i, res := range results { + batch.requests[i].result <- res + } + return nil +} + +func (tw *txWriter) Close() { + if !tw.closed { + tw.closed = true + tw.cancelFunc() + for _, workerDone := range tw.workersDone { + <-workerDone + } + } +} diff --git a/internal/txwriter/txwriter_test.go b/internal/txwriter/txwriter_test.go new file mode 100644 index 0000000000..80fc41a674 --- /dev/null +++ b/internal/txwriter/txwriter_test.go @@ -0,0 +1,250 @@ +// Copyright © 2023 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package txwriter + +import ( + "context" + "fmt" + "testing" + + "github.com/hyperledger/firefly-common/pkg/config" + "github.com/hyperledger/firefly-common/pkg/fftypes" + "github.com/hyperledger/firefly/internal/cache" + "github.com/hyperledger/firefly/internal/coreconfig" + "github.com/hyperledger/firefly/internal/database/sqlcommon" + "github.com/hyperledger/firefly/internal/operations" + "github.com/hyperledger/firefly/internal/txcommon" + "github.com/hyperledger/firefly/mocks/databasemocks" + "github.com/hyperledger/firefly/mocks/datamocks" + "github.com/hyperledger/firefly/pkg/core" + "github.com/hyperledger/firefly/pkg/database" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func newTestTransactionWriter(t *testing.T, dbCaps *database.Capabilities, mods ...func()) (context.Context, *txWriter, func()) { + ctx, cancelCtx := context.WithCancel(context.Background()) + + coreconfig.Reset() + config.Set(coreconfig.TransactionWriterCount, 1) + mdi := &databasemocks.Plugin{} + mdi.On("Capabilities").Return(dbCaps) + mrag := mdi.On("RunAsGroup", mock.Anything, mock.Anything) + mrag.Run(func(args mock.Arguments) { + ctx := args[0].(context.Context) + fn := args[1].(func(context.Context) error) + mrag.Return(fn(ctx)) + }).Maybe() + mdm := &datamocks.Manager{} + cm := cache.NewCacheManager(ctx) + for _, mod := range mods { + mod() + } + + txh, err := txcommon.NewTransactionHelper(ctx, "ns1", mdi, mdm, cm) + assert.NoError(t, err) + ops, err := operations.NewOperationsManager(ctx, "ns1", mdi, txh, cm) + assert.NoError(t, err) + txw := NewTransactionWriter(ctx, "ns1", mdi, txh, ops).(*txWriter) + return ctx, txw, func() { + cancelCtx() + mdi.AssertExpectations(t) + mdm.AssertExpectations(t) + txw.Close() + } +} + +func TestWorkerCountForcedToZeroIfNoDBConcurrency(t *testing.T) { + _, txw, done := newTestTransactionWriter(t, &database.Capabilities{Concurrency: false}) + defer done() + assert.Zero(t, txw.workerCount) + txw.Start() // no op +} + +func TestWriteNewTransactionClosed(t *testing.T) { + _, txw, done := newTestTransactionWriter(t, &database.Capabilities{Concurrency: true}) + done() + // Write under background context, but the write context is closed + _, err := txw.WriteTransactionAndOps(context.Background(), core.TransactionTypeContractInvoke, "") + assert.Regexp(t, "FF00154", err) +} + +func TestWriteNewTransactionInputContextClosed(t *testing.T) { + _, txw, done := newTestTransactionWriter(t, &database.Capabilities{Concurrency: true}) + defer done() + // Write under background context, but the write context is closed + cancelledCtx, cancelContext := context.WithCancel(context.Background()) + cancelContext() + _, err := txw.WriteTransactionAndOps(cancelledCtx, core.TransactionTypeContractInvoke, "") + assert.Regexp(t, "FF00154", err) +} + +func TestBatchOfOneSequentialSuccess(t *testing.T) { + ctx, txw, done := newTestTransactionWriter(t, &database.Capabilities{ + Concurrency: false, // will run inline + }) + defer done() + + inputOpID := fftypes.NewUUID() + mdi := txw.database.(*databasemocks.Plugin) + mdi.On("InsertTransactions", mock.Anything, mock.MatchedBy(func(txns []*core.Transaction) bool { + return len(txns) == 1 + })).Return(nil) + mdi.On("InsertEvent", mock.Anything, mock.MatchedBy(func(event *core.Event) bool { + return event.Type.Equals(core.EventTypeTransactionSubmitted) + })).Return(nil) + mdi.On("InsertOperations", mock.Anything, mock.MatchedBy(func(ops []*core.Operation) bool { + return len(ops) == 1 && ops[0].ID.Equals(inputOpID) + })).Return(nil) + + tx, err := txw.WriteTransactionAndOps(ctx, core.TransactionTypeContractInvoke, "", &core.Operation{ + ID: inputOpID, + }) + assert.NoError(t, err) + assert.NotNil(t, tx) + assert.NotNil(t, tx.ID) // generated for us + +} + +func TestBatchOfOneAsyncSuccess(t *testing.T) { + ctx, txw, done := newTestTransactionWriter(t, &database.Capabilities{ + Concurrency: true, + }) + defer done() + txw.Start() + + inputOpID := fftypes.NewUUID() + mdi := txw.database.(*databasemocks.Plugin) + mdi.On("InsertTransactions", mock.Anything, mock.MatchedBy(func(txns []*core.Transaction) bool { + return len(txns) == 1 + })).Return(nil) + mdi.On("InsertEvent", mock.Anything, mock.MatchedBy(func(event *core.Event) bool { + return event.Type.Equals(core.EventTypeTransactionSubmitted) + })).Return(nil) + mdi.On("InsertOperations", mock.Anything, mock.MatchedBy(func(ops []*core.Operation) bool { + return len(ops) == 1 && ops[0].ID.Equals(inputOpID) + })).Return(nil) + + op := &core.Operation{ + ID: inputOpID, + } + tx, err := txw.WriteTransactionAndOps(ctx, core.TransactionTypeContractInvoke, "", op) + assert.NoError(t, err) + assert.NotNil(t, tx) + assert.NotNil(t, tx.ID) // generated for us + assert.Equal(t, op.Transaction, tx.ID) // assigned for us + +} + +func TestBatchOfOneInsertOpFail(t *testing.T) { + ctx, txw, done := newTestTransactionWriter(t, &database.Capabilities{ + Concurrency: false, // will run inline + }) + defer done() + + inputOpID := fftypes.NewUUID() + mdi := txw.database.(*databasemocks.Plugin) + mdi.On("InsertTransactions", mock.Anything, mock.MatchedBy(func(txns []*core.Transaction) bool { + return len(txns) == 1 + })).Return(nil) + mdi.On("InsertEvent", mock.Anything, mock.MatchedBy(func(event *core.Event) bool { + return event.Type.Equals(core.EventTypeTransactionSubmitted) + })).Return(nil) + mdi.On("InsertOperations", mock.Anything, mock.MatchedBy(func(ops []*core.Operation) bool { + return len(ops) == 1 && ops[0].ID.Equals(inputOpID) + })).Return(fmt.Errorf("pop")) + + _, err := txw.WriteTransactionAndOps(ctx, core.TransactionTypeContractInvoke, "", &core.Operation{ + ID: inputOpID, + }) + assert.Regexp(t, "pop", err) + +} + +func TestInsertTxNonIdempotentFail(t *testing.T) { + ctx, txw, done := newTestTransactionWriter(t, &database.Capabilities{ + Concurrency: false, // will run inline + }) + defer done() + + inputOpID := fftypes.NewUUID() + mdi := txw.database.(*databasemocks.Plugin) + mdi.On("InsertTransactions", mock.Anything, mock.MatchedBy(func(txns []*core.Transaction) bool { + return len(txns) == 1 + })).Return(fmt.Errorf("pop")) + + _, err := txw.WriteTransactionAndOps(ctx, core.TransactionTypeContractInvoke, "", &core.Operation{ + ID: inputOpID, + }) + assert.Regexp(t, "pop", err) + +} + +func TestMixedIdempotencyResult(t *testing.T) { + ctx, txw, done := newTestTransactionWriter(t, &database.Capabilities{ + Concurrency: false, // will run inline + }) + defer done() + + mdi := txw.database.(*databasemocks.Plugin) + var firstTXID *fftypes.UUID + existingTXID := fftypes.NewUUID() + mdi.On("InsertTransactions", mock.Anything, mock.MatchedBy(func(txns []*core.Transaction) bool { + return len(txns) == 2 + })).Run(func(args mock.Arguments) { + txns := args[1].([]*core.Transaction) + firstTXID = txns[0].ID // capture this to provide a mixed result + }).Return(fmt.Errorf("mixed result")) + mockGet := mdi.On("GetTransactions", mock.Anything, "ns1", mock.Anything) + mockGet.Run(func(args mock.Arguments) { + mockGet.Return( + []*core.Transaction{ + {ID: firstTXID, IdempotencyKey: "idem1"}, + {ID: existingTXID /* existing */, IdempotencyKey: "idem2"}, + }, + nil, nil) + }) + mdi.On("InsertEvent", mock.Anything, mock.MatchedBy(func(event *core.Event) bool { + return event.Type.Equals(core.EventTypeTransactionSubmitted) + })).Return(nil).Once() + + done1 := make(chan *result, 1) + done2 := make(chan *result, 1) + txw.executeBatch(ctx, &txWriterBatch{ + requests: []*request{ + { + txType: core.TransactionTypeContractInvoke, + idempotencyKey: "idem1", + result: done1, + }, + { + txType: core.TransactionTypeContractInvoke, + idempotencyKey: "idem2", + result: done2, + }, + }, + }) + res1 := <-done1 + res2 := <-done2 + assert.NoError(t, res1.err) + assert.Equal(t, firstTXID, res1.transaction.ID) + assert.Regexp(t, "FF10431.*idem2", res2.err) + idemErr, ok := res2.err.(*sqlcommon.IdempotencyError) + assert.True(t, ok) + assert.Equal(t, existingTXID, idemErr.ExistingTXID) + +} diff --git a/manifest.json b/manifest.json index d5c867625a..bebcb9421b 100644 --- a/manifest.json +++ b/manifest.json @@ -1,19 +1,24 @@ { "ethconnect": { "image": "ghcr.io/hyperledger/firefly-ethconnect", - "tag": "v3.2.9", - "sha": "a9aed053d57d56532084e8d0229d3a6e8843054006cd82bb48c4800dcccc6449" + "tag": "v3.3.0", + "sha": "fde5083def6bd9f96bac9dacc3ac571d20ef9b8acf1049c1c30684eba1392f35" }, "evmconnect": { "image": "ghcr.io/hyperledger/firefly-evmconnect", - "tag": "v1.2.8", - "sha": "44cac586392a4b9c43f57e9955b53cdc1a9d71f7d12b97f9fe81ef0b92314ad0" + "tag": "v1.3.2", + "sha": "9752a2c8b9c6cbd3f2b7327a309609a6b33ec80973004aed37e193b4e7f4a7c4" }, "fabconnect": { "image": "ghcr.io/hyperledger/firefly-fabconnect", "tag": "v0.9.17", "sha": "7d4aa158f9dff31f200ae7a04f68665f32930f7739156b25a0da80e5353f3245" }, + "tezosconnect": { + "image": "ghcr.io/hyperledger/firefly-tezosconnect", + "tag": "v0.1.0", + "sha": "2c20731765f9203fceca6e28a5fa443e4ce5c3a9eb1de7710ed46f8e24db1c00" + }, "dataexchange-https": { "image": "ghcr.io/hyperledger/firefly-dataexchange-https", "tag": "v1.2.0", @@ -21,32 +26,32 @@ }, "tokens-erc1155": { "image": "ghcr.io/hyperledger/firefly-tokens-erc1155", - "tag": "v1.2.2", - "sha": "3d259bd4b68dad4634704c9a58c20bdf80457e8bc58ee7ded73292e10702aceb" + "tag": "v1.2.4", + "sha": "cd65eab2e5836b52dfed29a517049e21e2bacd534203b191cee7df42544847f7" }, "tokens-erc20-erc721": { "image": "ghcr.io/hyperledger/firefly-tokens-erc20-erc721", - "tag": "v1.2.3", - "sha": "e1c65bdf95bd5cd88102e539c0aa193edeaefc73fd268606c5550e2d1aae6b31" + "tag": "v1.2.6", + "sha": "4e902d1d9f115c4dc608f5d68c4bd9920e118c7eec8c99a2abdb3aa9de7e7368" }, "signer": { "image": "ghcr.io/hyperledger/firefly-signer", - "tag": "v1.1.5", - "sha": "4ee8549d12339f6d4224a277faf143da1749a51a5994e074224c95e3cce64670" + "tag": "v1.1.9", + "sha": "515271ea722e6bf9601524880e7280a1fc052453733338d6ed474f85d6618aa1" }, "build": { "firefly-builder": { - "image": "golang:1.18-alpine3.16" + "image": "golang:1.21-alpine3.19" }, "fabric-builder": { - "image": "golang:1.18-alpine3.16", + "image": "golang:1.21-alpine3.19", "platform": "linux/x86_64" }, "solidity-builder": { "image": "ethereum/solc:0.8.11-alpine" }, "base": { - "image": "alpine:3.16" + "image": "alpine:3.19" } }, "ui": { @@ -56,4 +61,4 @@ "cli": { "tag": "v1.2.1" } -} +} \ No newline at end of file diff --git a/manifestgen.sh b/manifestgen.sh index 5698c54f15..7533c6f415 100755 --- a/manifestgen.sh +++ b/manifestgen.sh @@ -45,6 +45,7 @@ SERVICES=( "ethconnect" "evmconnect" "fabconnect" + "tezosconnect" "dataexchange-https" "tokens-erc1155" "tokens-erc20-erc721" diff --git a/mocks/apiservermocks/ffi_swagger_gen.go b/mocks/apiservermocks/ffi_swagger_gen.go index f16781c8b1..fa0cf08386 100644 --- a/mocks/apiservermocks/ffi_swagger_gen.go +++ b/mocks/apiservermocks/ffi_swagger_gen.go @@ -1,16 +1,16 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package apiservermocks import ( context "context" - fftypes "github.com/hyperledger/firefly-common/pkg/fftypes" + ffapi "github.com/hyperledger/firefly-common/pkg/ffapi" core "github.com/hyperledger/firefly/pkg/core" - mock "github.com/stretchr/testify/mock" + fftypes "github.com/hyperledger/firefly-common/pkg/fftypes" - openapi3 "github.com/getkin/kin-openapi/openapi3" + mock "github.com/stretchr/testify/mock" ) // FFISwaggerGen is an autogenerated mock type for the FFISwaggerGen type @@ -18,29 +18,44 @@ type FFISwaggerGen struct { mock.Mock } -// Generate provides a mock function with given fields: ctx, baseURL, api, ffi -func (_m *FFISwaggerGen) Generate(ctx context.Context, baseURL string, api *core.ContractAPI, ffi *fftypes.FFI) *openapi3.T { - ret := _m.Called(ctx, baseURL, api, ffi) +// Build provides a mock function with given fields: ctx, api, ffi +func (_m *FFISwaggerGen) Build(ctx context.Context, api *core.ContractAPI, ffi *fftypes.FFI) (*ffapi.SwaggerGenOptions, []*ffapi.Route) { + ret := _m.Called(ctx, api, ffi) - var r0 *openapi3.T - if rf, ok := ret.Get(0).(func(context.Context, string, *core.ContractAPI, *fftypes.FFI) *openapi3.T); ok { - r0 = rf(ctx, baseURL, api, ffi) + if len(ret) == 0 { + panic("no return value specified for Build") + } + + var r0 *ffapi.SwaggerGenOptions + var r1 []*ffapi.Route + if rf, ok := ret.Get(0).(func(context.Context, *core.ContractAPI, *fftypes.FFI) (*ffapi.SwaggerGenOptions, []*ffapi.Route)); ok { + return rf(ctx, api, ffi) + } + if rf, ok := ret.Get(0).(func(context.Context, *core.ContractAPI, *fftypes.FFI) *ffapi.SwaggerGenOptions); ok { + r0 = rf(ctx, api, ffi) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*openapi3.T) + r0 = ret.Get(0).(*ffapi.SwaggerGenOptions) } } - return r0 -} + if rf, ok := ret.Get(1).(func(context.Context, *core.ContractAPI, *fftypes.FFI) []*ffapi.Route); ok { + r1 = rf(ctx, api, ffi) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]*ffapi.Route) + } + } -type mockConstructorTestingTNewFFISwaggerGen interface { - mock.TestingT - Cleanup(func()) + return r0, r1 } // NewFFISwaggerGen creates a new instance of FFISwaggerGen. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewFFISwaggerGen(t mockConstructorTestingTNewFFISwaggerGen) *FFISwaggerGen { +// The first argument is typically a *testing.T value. +func NewFFISwaggerGen(t interface { + mock.TestingT + Cleanup(func()) +}) *FFISwaggerGen { mock := &FFISwaggerGen{} mock.Mock.Test(t) diff --git a/mocks/apiservermocks/server.go b/mocks/apiservermocks/server.go index 52d79e921b..6b73db0b81 100644 --- a/mocks/apiservermocks/server.go +++ b/mocks/apiservermocks/server.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package apiservermocks @@ -18,6 +18,10 @@ type Server struct { func (_m *Server) Serve(ctx context.Context, mgr namespace.Manager) error { ret := _m.Called(ctx, mgr) + if len(ret) == 0 { + panic("no return value specified for Serve") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, namespace.Manager) error); ok { r0 = rf(ctx, mgr) @@ -28,13 +32,12 @@ func (_m *Server) Serve(ctx context.Context, mgr namespace.Manager) error { return r0 } -type mockConstructorTestingTNewServer interface { +// NewServer creates a new instance of Server. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewServer(t interface { mock.TestingT Cleanup(func()) -} - -// NewServer creates a new instance of Server. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewServer(t mockConstructorTestingTNewServer) *Server { +}) *Server { mock := &Server{} mock.Mock.Test(t) diff --git a/mocks/assetmocks/manager.go b/mocks/assetmocks/manager.go index c78336c842..262b2ac678 100644 --- a/mocks/assetmocks/manager.go +++ b/mocks/assetmocks/manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package assetmocks @@ -24,6 +24,10 @@ type Manager struct { func (_m *Manager) ActivateTokenPool(ctx context.Context, pool *core.TokenPool) error { ret := _m.Called(ctx, pool) + if len(ret) == 0 { + panic("no return value specified for ActivateTokenPool") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.TokenPool) error); ok { r0 = rf(ctx, pool) @@ -38,6 +42,10 @@ func (_m *Manager) ActivateTokenPool(ctx context.Context, pool *core.TokenPool) func (_m *Manager) BurnTokens(ctx context.Context, transfer *core.TokenTransferInput, waitConfirm bool) (*core.TokenTransfer, error) { ret := _m.Called(ctx, transfer, waitConfirm) + if len(ret) == 0 { + panic("no return value specified for BurnTokens") + } + var r0 *core.TokenTransfer var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.TokenTransferInput, bool) (*core.TokenTransfer, error)); ok { @@ -64,6 +72,10 @@ func (_m *Manager) BurnTokens(ctx context.Context, transfer *core.TokenTransferI func (_m *Manager) CreateTokenPool(ctx context.Context, pool *core.TokenPoolInput, waitConfirm bool) (*core.TokenPool, error) { ret := _m.Called(ctx, pool, waitConfirm) + if len(ret) == 0 { + panic("no return value specified for CreateTokenPool") + } + var r0 *core.TokenPool var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.TokenPoolInput, bool) (*core.TokenPool, error)); ok { @@ -86,10 +98,32 @@ func (_m *Manager) CreateTokenPool(ctx context.Context, pool *core.TokenPoolInpu return r0, r1 } +// DeleteTokenPool provides a mock function with given fields: ctx, poolNameOrID +func (_m *Manager) DeleteTokenPool(ctx context.Context, poolNameOrID string) error { + ret := _m.Called(ctx, poolNameOrID) + + if len(ret) == 0 { + panic("no return value specified for DeleteTokenPool") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, poolNameOrID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // GetTokenAccountPools provides a mock function with given fields: ctx, key, filter func (_m *Manager) GetTokenAccountPools(ctx context.Context, key string, filter ffapi.AndFilter) ([]*core.TokenAccountPool, *ffapi.FilterResult, error) { ret := _m.Called(ctx, key, filter) + if len(ret) == 0 { + panic("no return value specified for GetTokenAccountPools") + } + var r0 []*core.TokenAccountPool var r1 *ffapi.FilterResult var r2 error @@ -125,6 +159,10 @@ func (_m *Manager) GetTokenAccountPools(ctx context.Context, key string, filter func (_m *Manager) GetTokenAccounts(ctx context.Context, filter ffapi.AndFilter) ([]*core.TokenAccount, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetTokenAccounts") + } + var r0 []*core.TokenAccount var r1 *ffapi.FilterResult var r2 error @@ -160,6 +198,10 @@ func (_m *Manager) GetTokenAccounts(ctx context.Context, filter ffapi.AndFilter) func (_m *Manager) GetTokenApprovals(ctx context.Context, filter ffapi.AndFilter) ([]*core.TokenApproval, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetTokenApprovals") + } + var r0 []*core.TokenApproval var r1 *ffapi.FilterResult var r2 error @@ -195,6 +237,10 @@ func (_m *Manager) GetTokenApprovals(ctx context.Context, filter ffapi.AndFilter func (_m *Manager) GetTokenBalances(ctx context.Context, filter ffapi.AndFilter) ([]*core.TokenBalance, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetTokenBalances") + } + var r0 []*core.TokenBalance var r1 *ffapi.FilterResult var r2 error @@ -230,6 +276,10 @@ func (_m *Manager) GetTokenBalances(ctx context.Context, filter ffapi.AndFilter) func (_m *Manager) GetTokenConnectors(ctx context.Context) []*core.TokenConnector { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for GetTokenConnectors") + } + var r0 []*core.TokenConnector if rf, ok := ret.Get(0).(func(context.Context) []*core.TokenConnector); ok { r0 = rf(ctx) @@ -242,17 +292,51 @@ func (_m *Manager) GetTokenConnectors(ctx context.Context) []*core.TokenConnecto return r0 } -// GetTokenPool provides a mock function with given fields: ctx, connector, poolName -func (_m *Manager) GetTokenPool(ctx context.Context, connector string, poolName string) (*core.TokenPool, error) { - ret := _m.Called(ctx, connector, poolName) +// GetTokenPoolByID provides a mock function with given fields: ctx, id +func (_m *Manager) GetTokenPoolByID(ctx context.Context, id *fftypes.UUID) (*core.TokenPool, error) { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for GetTokenPoolByID") + } + + var r0 *core.TokenPool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID) (*core.TokenPool, error)); ok { + return rf(ctx, id) + } + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID) *core.TokenPool); ok { + r0 = rf(ctx, id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*core.TokenPool) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.UUID) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTokenPoolByLocator provides a mock function with given fields: ctx, connector, poolLocator +func (_m *Manager) GetTokenPoolByLocator(ctx context.Context, connector string, poolLocator string) (*core.TokenPool, error) { + ret := _m.Called(ctx, connector, poolLocator) + + if len(ret) == 0 { + panic("no return value specified for GetTokenPoolByLocator") + } var r0 *core.TokenPool var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, string) (*core.TokenPool, error)); ok { - return rf(ctx, connector, poolName) + return rf(ctx, connector, poolLocator) } if rf, ok := ret.Get(0).(func(context.Context, string, string) *core.TokenPool); ok { - r0 = rf(ctx, connector, poolName) + r0 = rf(ctx, connector, poolLocator) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*core.TokenPool) @@ -260,7 +344,7 @@ func (_m *Manager) GetTokenPool(ctx context.Context, connector string, poolName } if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { - r1 = rf(ctx, connector, poolName) + r1 = rf(ctx, connector, poolLocator) } else { r1 = ret.Error(1) } @@ -272,6 +356,10 @@ func (_m *Manager) GetTokenPool(ctx context.Context, connector string, poolName func (_m *Manager) GetTokenPoolByNameOrID(ctx context.Context, poolNameOrID string) (*core.TokenPool, error) { ret := _m.Called(ctx, poolNameOrID) + if len(ret) == 0 { + panic("no return value specified for GetTokenPoolByNameOrID") + } + var r0 *core.TokenPool var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.TokenPool, error)); ok { @@ -298,6 +386,10 @@ func (_m *Manager) GetTokenPoolByNameOrID(ctx context.Context, poolNameOrID stri func (_m *Manager) GetTokenPools(ctx context.Context, filter ffapi.AndFilter) ([]*core.TokenPool, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetTokenPools") + } + var r0 []*core.TokenPool var r1 *ffapi.FilterResult var r2 error @@ -333,6 +425,10 @@ func (_m *Manager) GetTokenPools(ctx context.Context, filter ffapi.AndFilter) ([ func (_m *Manager) GetTokenTransferByID(ctx context.Context, id string) (*core.TokenTransfer, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetTokenTransferByID") + } + var r0 *core.TokenTransfer var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.TokenTransfer, error)); ok { @@ -359,6 +455,10 @@ func (_m *Manager) GetTokenTransferByID(ctx context.Context, id string) (*core.T func (_m *Manager) GetTokenTransfers(ctx context.Context, filter ffapi.AndFilter) ([]*core.TokenTransfer, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetTokenTransfers") + } + var r0 []*core.TokenTransfer var r1 *ffapi.FilterResult var r2 error @@ -394,6 +494,10 @@ func (_m *Manager) GetTokenTransfers(ctx context.Context, filter ffapi.AndFilter func (_m *Manager) MintTokens(ctx context.Context, transfer *core.TokenTransferInput, waitConfirm bool) (*core.TokenTransfer, error) { ret := _m.Called(ctx, transfer, waitConfirm) + if len(ret) == 0 { + panic("no return value specified for MintTokens") + } + var r0 *core.TokenTransfer var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.TokenTransferInput, bool) (*core.TokenTransfer, error)); ok { @@ -420,6 +524,10 @@ func (_m *Manager) MintTokens(ctx context.Context, transfer *core.TokenTransferI func (_m *Manager) Name() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Name") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -434,6 +542,10 @@ func (_m *Manager) Name() string { func (_m *Manager) NewApproval(approve *core.TokenApprovalInput) syncasync.Sender { ret := _m.Called(approve) + if len(ret) == 0 { + panic("no return value specified for NewApproval") + } + var r0 syncasync.Sender if rf, ok := ret.Get(0).(func(*core.TokenApprovalInput) syncasync.Sender); ok { r0 = rf(approve) @@ -450,6 +562,10 @@ func (_m *Manager) NewApproval(approve *core.TokenApprovalInput) syncasync.Sende func (_m *Manager) NewTransfer(transfer *core.TokenTransferInput) syncasync.Sender { ret := _m.Called(transfer) + if len(ret) == 0 { + panic("no return value specified for NewTransfer") + } + var r0 syncasync.Sender if rf, ok := ret.Get(0).(func(*core.TokenTransferInput) syncasync.Sender); ok { r0 = rf(transfer) @@ -466,6 +582,10 @@ func (_m *Manager) NewTransfer(transfer *core.TokenTransferInput) syncasync.Send func (_m *Manager) PrepareOperation(ctx context.Context, op *core.Operation) (*core.PreparedOperation, error) { ret := _m.Called(ctx, op) + if len(ret) == 0 { + panic("no return value specified for PrepareOperation") + } + var r0 *core.PreparedOperation var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.Operation) (*core.PreparedOperation, error)); ok { @@ -492,6 +612,10 @@ func (_m *Manager) PrepareOperation(ctx context.Context, op *core.Operation) (*c func (_m *Manager) ResolvePoolMethods(ctx context.Context, pool *core.TokenPool) error { ret := _m.Called(ctx, pool) + if len(ret) == 0 { + panic("no return value specified for ResolvePoolMethods") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.TokenPool) error); ok { r0 = rf(ctx, pool) @@ -503,13 +627,17 @@ func (_m *Manager) ResolvePoolMethods(ctx context.Context, pool *core.TokenPool) } // RunOperation provides a mock function with given fields: ctx, op -func (_m *Manager) RunOperation(ctx context.Context, op *core.PreparedOperation) (fftypes.JSONObject, bool, error) { +func (_m *Manager) RunOperation(ctx context.Context, op *core.PreparedOperation) (fftypes.JSONObject, core.OpPhase, error) { ret := _m.Called(ctx, op) + if len(ret) == 0 { + panic("no return value specified for RunOperation") + } + var r0 fftypes.JSONObject - var r1 bool + var r1 core.OpPhase var r2 error - if rf, ok := ret.Get(0).(func(context.Context, *core.PreparedOperation) (fftypes.JSONObject, bool, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *core.PreparedOperation) (fftypes.JSONObject, core.OpPhase, error)); ok { return rf(ctx, op) } if rf, ok := ret.Get(0).(func(context.Context, *core.PreparedOperation) fftypes.JSONObject); ok { @@ -520,10 +648,10 @@ func (_m *Manager) RunOperation(ctx context.Context, op *core.PreparedOperation) } } - if rf, ok := ret.Get(1).(func(context.Context, *core.PreparedOperation) bool); ok { + if rf, ok := ret.Get(1).(func(context.Context, *core.PreparedOperation) core.OpPhase); ok { r1 = rf(ctx, op) } else { - r1 = ret.Get(1).(bool) + r1 = ret.Get(1).(core.OpPhase) } if rf, ok := ret.Get(2).(func(context.Context, *core.PreparedOperation) error); ok { @@ -539,6 +667,10 @@ func (_m *Manager) RunOperation(ctx context.Context, op *core.PreparedOperation) func (_m *Manager) TokenApproval(ctx context.Context, approval *core.TokenApprovalInput, waitConfirm bool) (*core.TokenApproval, error) { ret := _m.Called(ctx, approval, waitConfirm) + if len(ret) == 0 { + panic("no return value specified for TokenApproval") + } + var r0 *core.TokenApproval var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.TokenApprovalInput, bool) (*core.TokenApproval, error)); ok { @@ -565,6 +697,10 @@ func (_m *Manager) TokenApproval(ctx context.Context, approval *core.TokenApprov func (_m *Manager) TransferTokens(ctx context.Context, transfer *core.TokenTransferInput, waitConfirm bool) (*core.TokenTransfer, error) { ret := _m.Called(ctx, transfer, waitConfirm) + if len(ret) == 0 { + panic("no return value specified for TransferTokens") + } + var r0 *core.TokenTransfer var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.TokenTransferInput, bool) (*core.TokenTransfer, error)); ok { @@ -587,13 +723,12 @@ func (_m *Manager) TransferTokens(ctx context.Context, transfer *core.TokenTrans return r0, r1 } -type mockConstructorTestingTNewManager interface { +// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewManager(t interface { mock.TestingT Cleanup(func()) -} - -// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewManager(t mockConstructorTestingTNewManager) *Manager { +}) *Manager { mock := &Manager{} mock.Mock.Test(t) diff --git a/mocks/batchmocks/manager.go b/mocks/batchmocks/manager.go index a7cf062bb6..c938d70a7c 100644 --- a/mocks/batchmocks/manager.go +++ b/mocks/batchmocks/manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package batchmocks @@ -26,6 +26,10 @@ func (_m *Manager) Close() { func (_m *Manager) LoadContexts(ctx context.Context, payload *batch.DispatchPayload) error { ret := _m.Called(ctx, payload) + if len(ret) == 0 { + panic("no return value specified for LoadContexts") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *batch.DispatchPayload) error); ok { r0 = rf(ctx, payload) @@ -40,6 +44,10 @@ func (_m *Manager) LoadContexts(ctx context.Context, payload *batch.DispatchPayl func (_m *Manager) NewMessages() chan<- int64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for NewMessages") + } + var r0 chan<- int64 if rf, ok := ret.Get(0).(func() chan<- int64); ok { r0 = rf() @@ -61,6 +69,10 @@ func (_m *Manager) RegisterDispatcher(name string, txType fftypes.FFEnum, msgTyp func (_m *Manager) Start() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Start") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -75,6 +87,10 @@ func (_m *Manager) Start() error { func (_m *Manager) Status() *batch.ManagerStatus { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Status") + } + var r0 *batch.ManagerStatus if rf, ok := ret.Get(0).(func() *batch.ManagerStatus); ok { r0 = rf() @@ -92,13 +108,12 @@ func (_m *Manager) WaitStop() { _m.Called() } -type mockConstructorTestingTNewManager interface { +// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewManager(t interface { mock.TestingT Cleanup(func()) -} - -// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewManager(t mockConstructorTestingTNewManager) *Manager { +}) *Manager { mock := &Manager{} mock.Mock.Test(t) diff --git a/mocks/blockchaincommonmocks/firefly_subscriptions.go b/mocks/blockchaincommonmocks/firefly_subscriptions.go index 298e3a4058..c8ce823660 100644 --- a/mocks/blockchaincommonmocks/firefly_subscriptions.go +++ b/mocks/blockchaincommonmocks/firefly_subscriptions.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package blockchaincommonmocks @@ -26,6 +26,10 @@ func (_m *FireflySubscriptions) AddSubscription(ctx context.Context, namespace * func (_m *FireflySubscriptions) GetSubscription(subID string) *common.SubscriptionInfo { ret := _m.Called(subID) + if len(ret) == 0 { + panic("no return value specified for GetSubscription") + } + var r0 *common.SubscriptionInfo if rf, ok := ret.Get(0).(func(string) *common.SubscriptionInfo); ok { r0 = rf(subID) @@ -43,13 +47,12 @@ func (_m *FireflySubscriptions) RemoveSubscription(ctx context.Context, subID st _m.Called(ctx, subID) } -type mockConstructorTestingTNewFireflySubscriptions interface { +// NewFireflySubscriptions creates a new instance of FireflySubscriptions. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFireflySubscriptions(t interface { mock.TestingT Cleanup(func()) -} - -// NewFireflySubscriptions creates a new instance of FireflySubscriptions. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewFireflySubscriptions(t mockConstructorTestingTNewFireflySubscriptions) *FireflySubscriptions { +}) *FireflySubscriptions { mock := &FireflySubscriptions{} mock.Mock.Test(t) diff --git a/mocks/blockchainmocks/callbacks.go b/mocks/blockchainmocks/callbacks.go index 9ebf2a0d50..0769667f6e 100644 --- a/mocks/blockchainmocks/callbacks.go +++ b/mocks/blockchainmocks/callbacks.go @@ -1,13 +1,9 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package blockchainmocks import ( blockchain "github.com/hyperledger/firefly/pkg/blockchain" - core "github.com/hyperledger/firefly/pkg/core" - - fftypes "github.com/hyperledger/firefly-common/pkg/fftypes" - mock "github.com/stretchr/testify/mock" ) @@ -16,41 +12,17 @@ type Callbacks struct { mock.Mock } -// BatchPinComplete provides a mock function with given fields: namespace, batch, signingKey -func (_m *Callbacks) BatchPinComplete(namespace string, batch *blockchain.BatchPin, signingKey *core.VerifierRef) error { - ret := _m.Called(namespace, batch, signingKey) - - var r0 error - if rf, ok := ret.Get(0).(func(string, *blockchain.BatchPin, *core.VerifierRef) error); ok { - r0 = rf(namespace, batch, signingKey) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// BlockchainEvent provides a mock function with given fields: event -func (_m *Callbacks) BlockchainEvent(event *blockchain.EventWithSubscription) error { - ret := _m.Called(event) +// BlockchainEventBatch provides a mock function with given fields: batch +func (_m *Callbacks) BlockchainEventBatch(batch []*blockchain.EventToDispatch) error { + ret := _m.Called(batch) - var r0 error - if rf, ok := ret.Get(0).(func(*blockchain.EventWithSubscription) error); ok { - r0 = rf(event) - } else { - r0 = ret.Error(0) + if len(ret) == 0 { + panic("no return value specified for BlockchainEventBatch") } - return r0 -} - -// BlockchainNetworkAction provides a mock function with given fields: action, location, event, signingKey -func (_m *Callbacks) BlockchainNetworkAction(action string, location *fftypes.JSONAny, event *blockchain.Event, signingKey *core.VerifierRef) error { - ret := _m.Called(action, location, event, signingKey) - var r0 error - if rf, ok := ret.Get(0).(func(string, *fftypes.JSONAny, *blockchain.Event, *core.VerifierRef) error); ok { - r0 = rf(action, location, event, signingKey) + if rf, ok := ret.Get(0).(func([]*blockchain.EventToDispatch) error); ok { + r0 = rf(batch) } else { r0 = ret.Error(0) } @@ -58,13 +30,12 @@ func (_m *Callbacks) BlockchainNetworkAction(action string, location *fftypes.JS return r0 } -type mockConstructorTestingTNewCallbacks interface { +// NewCallbacks creates a new instance of Callbacks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCallbacks(t interface { mock.TestingT Cleanup(func()) -} - -// NewCallbacks creates a new instance of Callbacks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCallbacks(t mockConstructorTestingTNewCallbacks) *Callbacks { +}) *Callbacks { mock := &Callbacks{} mock.Mock.Test(t) diff --git a/mocks/blockchainmocks/plugin.go b/mocks/blockchainmocks/plugin.go index 3f73f26a34..1f08022592 100644 --- a/mocks/blockchainmocks/plugin.go +++ b/mocks/blockchainmocks/plugin.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package blockchainmocks @@ -28,6 +28,10 @@ type Plugin struct { func (_m *Plugin) AddContractListener(ctx context.Context, subscription *core.ContractListener) error { ret := _m.Called(ctx, subscription) + if len(ret) == 0 { + panic("no return value specified for AddContractListener") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.ContractListener) error); ok { r0 = rf(ctx, subscription) @@ -42,6 +46,10 @@ func (_m *Plugin) AddContractListener(ctx context.Context, subscription *core.Co func (_m *Plugin) AddFireflySubscription(ctx context.Context, namespace *core.Namespace, contract *blockchain.MultipartyContract) (string, error) { ret := _m.Called(ctx, namespace, contract) + if len(ret) == 0 { + panic("no return value specified for AddFireflySubscription") + } + var r0 string var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.Namespace, *blockchain.MultipartyContract) (string, error)); ok { @@ -66,6 +74,10 @@ func (_m *Plugin) AddFireflySubscription(ctx context.Context, namespace *core.Na func (_m *Plugin) Capabilities() *blockchain.Capabilities { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Capabilities") + } + var r0 *blockchain.Capabilities if rf, ok := ret.Get(0).(func() *blockchain.Capabilities); ok { r0 = rf() @@ -82,6 +94,10 @@ func (_m *Plugin) Capabilities() *blockchain.Capabilities { func (_m *Plugin) DeleteContractListener(ctx context.Context, subscription *core.ContractListener, okNotFound bool) error { ret := _m.Called(ctx, subscription, okNotFound) + if len(ret) == 0 { + panic("no return value specified for DeleteContractListener") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.ContractListener, bool) error); ok { r0 = rf(ctx, subscription, okNotFound) @@ -93,23 +109,41 @@ func (_m *Plugin) DeleteContractListener(ctx context.Context, subscription *core } // DeployContract provides a mock function with given fields: ctx, nsOpID, signingKey, definition, contract, input, options -func (_m *Plugin) DeployContract(ctx context.Context, nsOpID string, signingKey string, definition *fftypes.JSONAny, contract *fftypes.JSONAny, input []interface{}, options map[string]interface{}) error { +func (_m *Plugin) DeployContract(ctx context.Context, nsOpID string, signingKey string, definition *fftypes.JSONAny, contract *fftypes.JSONAny, input []interface{}, options map[string]interface{}) (bool, error) { ret := _m.Called(ctx, nsOpID, signingKey, definition, contract, input, options) - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string, string, *fftypes.JSONAny, *fftypes.JSONAny, []interface{}, map[string]interface{}) error); ok { + if len(ret) == 0 { + panic("no return value specified for DeployContract") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, *fftypes.JSONAny, *fftypes.JSONAny, []interface{}, map[string]interface{}) (bool, error)); ok { + return rf(ctx, nsOpID, signingKey, definition, contract, input, options) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, *fftypes.JSONAny, *fftypes.JSONAny, []interface{}, map[string]interface{}) bool); ok { r0 = rf(ctx, nsOpID, signingKey, definition, contract, input, options) } else { - r0 = ret.Error(0) + r0 = ret.Get(0).(bool) } - return r0 + if rf, ok := ret.Get(1).(func(context.Context, string, string, *fftypes.JSONAny, *fftypes.JSONAny, []interface{}, map[string]interface{}) error); ok { + r1 = rf(ctx, nsOpID, signingKey, definition, contract, input, options) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } // GenerateErrorSignature provides a mock function with given fields: ctx, errorDef func (_m *Plugin) GenerateErrorSignature(ctx context.Context, errorDef *fftypes.FFIErrorDefinition) string { ret := _m.Called(ctx, errorDef) + if len(ret) == 0 { + panic("no return value specified for GenerateErrorSignature") + } + var r0 string if rf, ok := ret.Get(0).(func(context.Context, *fftypes.FFIErrorDefinition) string); ok { r0 = rf(ctx, errorDef) @@ -124,6 +158,10 @@ func (_m *Plugin) GenerateErrorSignature(ctx context.Context, errorDef *fftypes. func (_m *Plugin) GenerateEventSignature(ctx context.Context, event *fftypes.FFIEventDefinition) string { ret := _m.Called(ctx, event) + if len(ret) == 0 { + panic("no return value specified for GenerateEventSignature") + } + var r0 string if rf, ok := ret.Get(0).(func(context.Context, *fftypes.FFIEventDefinition) string); ok { r0 = rf(ctx, event) @@ -138,6 +176,10 @@ func (_m *Plugin) GenerateEventSignature(ctx context.Context, event *fftypes.FFI func (_m *Plugin) GenerateFFI(ctx context.Context, generationRequest *fftypes.FFIGenerationRequest) (*fftypes.FFI, error) { ret := _m.Called(ctx, generationRequest) + if len(ret) == 0 { + panic("no return value specified for GenerateFFI") + } + var r0 *fftypes.FFI var r1 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.FFIGenerationRequest) (*fftypes.FFI, error)); ok { @@ -164,6 +206,10 @@ func (_m *Plugin) GenerateFFI(ctx context.Context, generationRequest *fftypes.FF func (_m *Plugin) GetAndConvertDeprecatedContractConfig(ctx context.Context) (*fftypes.JSONAny, string, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for GetAndConvertDeprecatedContractConfig") + } + var r0 *fftypes.JSONAny var r1 string var r2 error @@ -197,6 +243,10 @@ func (_m *Plugin) GetAndConvertDeprecatedContractConfig(ctx context.Context) (*f func (_m *Plugin) GetContractListenerStatus(ctx context.Context, subID string, okNotFound bool) (bool, interface{}, error) { ret := _m.Called(ctx, subID, okNotFound) + if len(ret) == 0 { + panic("no return value specified for GetContractListenerStatus") + } + var r0 bool var r1 interface{} var r2 error @@ -230,6 +280,10 @@ func (_m *Plugin) GetContractListenerStatus(ctx context.Context, subID string, o func (_m *Plugin) GetFFIParamValidator(ctx context.Context) (fftypes.FFIParamValidator, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for GetFFIParamValidator") + } + var r0 fftypes.FFIParamValidator var r1 error if rf, ok := ret.Get(0).(func(context.Context) (fftypes.FFIParamValidator, error)); ok { @@ -256,6 +310,10 @@ func (_m *Plugin) GetFFIParamValidator(ctx context.Context) (fftypes.FFIParamVal func (_m *Plugin) GetNetworkVersion(ctx context.Context, location *fftypes.JSONAny) (int, error) { ret := _m.Called(ctx, location) + if len(ret) == 0 { + panic("no return value specified for GetNetworkVersion") + } + var r0 int var r1 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.JSONAny) (int, error)); ok { @@ -280,6 +338,10 @@ func (_m *Plugin) GetNetworkVersion(ctx context.Context, location *fftypes.JSONA func (_m *Plugin) GetTransactionStatus(ctx context.Context, operation *core.Operation) (interface{}, error) { ret := _m.Called(ctx, operation) + if len(ret) == 0 { + panic("no return value specified for GetTransactionStatus") + } + var r0 interface{} var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.Operation) (interface{}, error)); ok { @@ -306,6 +368,10 @@ func (_m *Plugin) GetTransactionStatus(ctx context.Context, operation *core.Oper func (_m *Plugin) Init(ctx context.Context, cancelCtx context.CancelFunc, _a2 config.Section, _a3 metrics.Manager, cacheManager cache.Manager) error { ret := _m.Called(ctx, cancelCtx, _a2, _a3, cacheManager) + if len(ret) == 0 { + panic("no return value specified for Init") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, context.CancelFunc, config.Section, metrics.Manager, cache.Manager) error); ok { r0 = rf(ctx, cancelCtx, _a2, _a3, cacheManager) @@ -321,24 +387,42 @@ func (_m *Plugin) InitConfig(_a0 config.Section) { _m.Called(_a0) } -// InvokeContract provides a mock function with given fields: ctx, nsOpID, signingKey, location, method, input, errors, options, batch -func (_m *Plugin) InvokeContract(ctx context.Context, nsOpID string, signingKey string, location *fftypes.JSONAny, method *fftypes.FFIMethod, input map[string]interface{}, errors []*fftypes.FFIError, options map[string]interface{}, batch *blockchain.BatchPin) error { - ret := _m.Called(ctx, nsOpID, signingKey, location, method, input, errors, options, batch) +// InvokeContract provides a mock function with given fields: ctx, nsOpID, signingKey, location, parsedMethod, input, options, batch +func (_m *Plugin) InvokeContract(ctx context.Context, nsOpID string, signingKey string, location *fftypes.JSONAny, parsedMethod interface{}, input map[string]interface{}, options map[string]interface{}, batch *blockchain.BatchPin) (bool, error) { + ret := _m.Called(ctx, nsOpID, signingKey, location, parsedMethod, input, options, batch) - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string, string, *fftypes.JSONAny, *fftypes.FFIMethod, map[string]interface{}, []*fftypes.FFIError, map[string]interface{}, *blockchain.BatchPin) error); ok { - r0 = rf(ctx, nsOpID, signingKey, location, method, input, errors, options, batch) + if len(ret) == 0 { + panic("no return value specified for InvokeContract") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, *fftypes.JSONAny, interface{}, map[string]interface{}, map[string]interface{}, *blockchain.BatchPin) (bool, error)); ok { + return rf(ctx, nsOpID, signingKey, location, parsedMethod, input, options, batch) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, *fftypes.JSONAny, interface{}, map[string]interface{}, map[string]interface{}, *blockchain.BatchPin) bool); ok { + r0 = rf(ctx, nsOpID, signingKey, location, parsedMethod, input, options, batch) } else { - r0 = ret.Error(0) + r0 = ret.Get(0).(bool) } - return r0 + if rf, ok := ret.Get(1).(func(context.Context, string, string, *fftypes.JSONAny, interface{}, map[string]interface{}, map[string]interface{}, *blockchain.BatchPin) error); ok { + r1 = rf(ctx, nsOpID, signingKey, location, parsedMethod, input, options, batch) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } // Name provides a mock function with given fields: func (_m *Plugin) Name() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Name") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -353,6 +437,10 @@ func (_m *Plugin) Name() string { func (_m *Plugin) NormalizeContractLocation(ctx context.Context, ntype blockchain.NormalizeType, location *fftypes.JSONAny) (*fftypes.JSONAny, error) { ret := _m.Called(ctx, ntype, location) + if len(ret) == 0 { + panic("no return value specified for NormalizeContractLocation") + } + var r0 *fftypes.JSONAny var r1 error if rf, ok := ret.Get(0).(func(context.Context, blockchain.NormalizeType, *fftypes.JSONAny) (*fftypes.JSONAny, error)); ok { @@ -375,25 +463,59 @@ func (_m *Plugin) NormalizeContractLocation(ctx context.Context, ntype blockchai return r0, r1 } -// QueryContract provides a mock function with given fields: ctx, signingKey, location, method, input, errors, options -func (_m *Plugin) QueryContract(ctx context.Context, signingKey string, location *fftypes.JSONAny, method *fftypes.FFIMethod, input map[string]interface{}, errors []*fftypes.FFIError, options map[string]interface{}) (interface{}, error) { - ret := _m.Called(ctx, signingKey, location, method, input, errors, options) +// ParseInterface provides a mock function with given fields: ctx, method, errors +func (_m *Plugin) ParseInterface(ctx context.Context, method *fftypes.FFIMethod, errors []*fftypes.FFIError) (interface{}, error) { + ret := _m.Called(ctx, method, errors) + + if len(ret) == 0 { + panic("no return value specified for ParseInterface") + } var r0 interface{} var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.JSONAny, *fftypes.FFIMethod, map[string]interface{}, []*fftypes.FFIError, map[string]interface{}) (interface{}, error)); ok { - return rf(ctx, signingKey, location, method, input, errors, options) + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.FFIMethod, []*fftypes.FFIError) (interface{}, error)); ok { + return rf(ctx, method, errors) } - if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.JSONAny, *fftypes.FFIMethod, map[string]interface{}, []*fftypes.FFIError, map[string]interface{}) interface{}); ok { - r0 = rf(ctx, signingKey, location, method, input, errors, options) + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.FFIMethod, []*fftypes.FFIError) interface{}); ok { + r0 = rf(ctx, method, errors) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(interface{}) } } - if rf, ok := ret.Get(1).(func(context.Context, string, *fftypes.JSONAny, *fftypes.FFIMethod, map[string]interface{}, []*fftypes.FFIError, map[string]interface{}) error); ok { - r1 = rf(ctx, signingKey, location, method, input, errors, options) + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.FFIMethod, []*fftypes.FFIError) error); ok { + r1 = rf(ctx, method, errors) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QueryContract provides a mock function with given fields: ctx, signingKey, location, parsedMethod, input, options +func (_m *Plugin) QueryContract(ctx context.Context, signingKey string, location *fftypes.JSONAny, parsedMethod interface{}, input map[string]interface{}, options map[string]interface{}) (interface{}, error) { + ret := _m.Called(ctx, signingKey, location, parsedMethod, input, options) + + if len(ret) == 0 { + panic("no return value specified for QueryContract") + } + + var r0 interface{} + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.JSONAny, interface{}, map[string]interface{}, map[string]interface{}) (interface{}, error)); ok { + return rf(ctx, signingKey, location, parsedMethod, input, options) + } + if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.JSONAny, interface{}, map[string]interface{}, map[string]interface{}) interface{}); ok { + r0 = rf(ctx, signingKey, location, parsedMethod, input, options) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(interface{}) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, *fftypes.JSONAny, interface{}, map[string]interface{}, map[string]interface{}) error); ok { + r1 = rf(ctx, signingKey, location, parsedMethod, input, options) } else { r1 = ret.Error(1) } @@ -410,6 +532,10 @@ func (_m *Plugin) RemoveFireflySubscription(ctx context.Context, subID string) { func (_m *Plugin) ResolveSigningKey(ctx context.Context, keyRef string, intent blockchain.ResolveKeyIntent) (string, error) { ret := _m.Called(ctx, keyRef, intent) + if len(ret) == 0 { + panic("no return value specified for ResolveSigningKey") + } + var r0 string var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, blockchain.ResolveKeyIntent) (string, error)); ok { @@ -444,6 +570,10 @@ func (_m *Plugin) SetOperationHandler(namespace string, handler core.OperationCa func (_m *Plugin) Start() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Start") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -458,6 +588,10 @@ func (_m *Plugin) Start() error { func (_m *Plugin) SubmitBatchPin(ctx context.Context, nsOpID string, networkNamespace string, signingKey string, batch *blockchain.BatchPin, location *fftypes.JSONAny) error { ret := _m.Called(ctx, nsOpID, networkNamespace, signingKey, batch, location) + if len(ret) == 0 { + panic("no return value specified for SubmitBatchPin") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, string, string, *blockchain.BatchPin, *fftypes.JSONAny) error); ok { r0 = rf(ctx, nsOpID, networkNamespace, signingKey, batch, location) @@ -472,6 +606,10 @@ func (_m *Plugin) SubmitBatchPin(ctx context.Context, nsOpID string, networkName func (_m *Plugin) SubmitNetworkAction(ctx context.Context, nsOpID string, signingKey string, action fftypes.FFEnum, location *fftypes.JSONAny) error { ret := _m.Called(ctx, nsOpID, signingKey, action, location) + if len(ret) == 0 { + panic("no return value specified for SubmitNetworkAction") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, string, fftypes.FFEnum, *fftypes.JSONAny) error); ok { r0 = rf(ctx, nsOpID, signingKey, action, location) @@ -482,13 +620,17 @@ func (_m *Plugin) SubmitNetworkAction(ctx context.Context, nsOpID string, signin return r0 } -// ValidateInvokeRequest provides a mock function with given fields: ctx, method, input, errors, hasMessage -func (_m *Plugin) ValidateInvokeRequest(ctx context.Context, method *fftypes.FFIMethod, input map[string]interface{}, errors []*fftypes.FFIError, hasMessage bool) error { - ret := _m.Called(ctx, method, input, errors, hasMessage) +// ValidateInvokeRequest provides a mock function with given fields: ctx, parsedMethod, input, hasMessage +func (_m *Plugin) ValidateInvokeRequest(ctx context.Context, parsedMethod interface{}, input map[string]interface{}, hasMessage bool) error { + ret := _m.Called(ctx, parsedMethod, input, hasMessage) + + if len(ret) == 0 { + panic("no return value specified for ValidateInvokeRequest") + } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *fftypes.FFIMethod, map[string]interface{}, []*fftypes.FFIError, bool) error); ok { - r0 = rf(ctx, method, input, errors, hasMessage) + if rf, ok := ret.Get(0).(func(context.Context, interface{}, map[string]interface{}, bool) error); ok { + r0 = rf(ctx, parsedMethod, input, hasMessage) } else { r0 = ret.Error(0) } @@ -500,6 +642,10 @@ func (_m *Plugin) ValidateInvokeRequest(ctx context.Context, method *fftypes.FFI func (_m *Plugin) VerifierType() fftypes.FFEnum { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for VerifierType") + } + var r0 fftypes.FFEnum if rf, ok := ret.Get(0).(func() fftypes.FFEnum); ok { r0 = rf() @@ -510,13 +656,12 @@ func (_m *Plugin) VerifierType() fftypes.FFEnum { return r0 } -type mockConstructorTestingTNewPlugin interface { +// NewPlugin creates a new instance of Plugin. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPlugin(t interface { mock.TestingT Cleanup(func()) -} - -// NewPlugin creates a new instance of Plugin. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPlugin(t mockConstructorTestingTNewPlugin) *Plugin { +}) *Plugin { mock := &Plugin{} mock.Mock.Test(t) diff --git a/mocks/broadcastmocks/manager.go b/mocks/broadcastmocks/manager.go index dd7f67dabe..76b9fcefc5 100644 --- a/mocks/broadcastmocks/manager.go +++ b/mocks/broadcastmocks/manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package broadcastmocks @@ -22,6 +22,10 @@ type Manager struct { func (_m *Manager) BroadcastMessage(ctx context.Context, in *core.MessageInOut, waitConfirm bool) (*core.Message, error) { ret := _m.Called(ctx, in, waitConfirm) + if len(ret) == 0 { + panic("no return value specified for BroadcastMessage") + } + var r0 *core.Message var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.MessageInOut, bool) (*core.Message, error)); ok { @@ -48,6 +52,10 @@ func (_m *Manager) BroadcastMessage(ctx context.Context, in *core.MessageInOut, func (_m *Manager) Name() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Name") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -62,6 +70,10 @@ func (_m *Manager) Name() string { func (_m *Manager) NewBroadcast(in *core.MessageInOut) syncasync.Sender { ret := _m.Called(in) + if len(ret) == 0 { + panic("no return value specified for NewBroadcast") + } + var r0 syncasync.Sender if rf, ok := ret.Get(0).(func(*core.MessageInOut) syncasync.Sender); ok { r0 = rf(in) @@ -78,6 +90,10 @@ func (_m *Manager) NewBroadcast(in *core.MessageInOut) syncasync.Sender { func (_m *Manager) PrepareOperation(ctx context.Context, op *core.Operation) (*core.PreparedOperation, error) { ret := _m.Called(ctx, op) + if len(ret) == 0 { + panic("no return value specified for PrepareOperation") + } + var r0 *core.PreparedOperation var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.Operation) (*core.PreparedOperation, error)); ok { @@ -104,6 +120,10 @@ func (_m *Manager) PrepareOperation(ctx context.Context, op *core.Operation) (*c func (_m *Manager) PublishDataBlob(ctx context.Context, id string, idempotencyKey core.IdempotencyKey) (*core.Data, error) { ret := _m.Called(ctx, id, idempotencyKey) + if len(ret) == 0 { + panic("no return value specified for PublishDataBlob") + } + var r0 *core.Data var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, core.IdempotencyKey) (*core.Data, error)); ok { @@ -130,6 +150,10 @@ func (_m *Manager) PublishDataBlob(ctx context.Context, id string, idempotencyKe func (_m *Manager) PublishDataValue(ctx context.Context, id string, idempotencyKey core.IdempotencyKey) (*core.Data, error) { ret := _m.Called(ctx, id, idempotencyKey) + if len(ret) == 0 { + panic("no return value specified for PublishDataValue") + } + var r0 *core.Data var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, core.IdempotencyKey) (*core.Data, error)); ok { @@ -153,13 +177,17 @@ func (_m *Manager) PublishDataValue(ctx context.Context, id string, idempotencyK } // RunOperation provides a mock function with given fields: ctx, op -func (_m *Manager) RunOperation(ctx context.Context, op *core.PreparedOperation) (fftypes.JSONObject, bool, error) { +func (_m *Manager) RunOperation(ctx context.Context, op *core.PreparedOperation) (fftypes.JSONObject, core.OpPhase, error) { ret := _m.Called(ctx, op) + if len(ret) == 0 { + panic("no return value specified for RunOperation") + } + var r0 fftypes.JSONObject - var r1 bool + var r1 core.OpPhase var r2 error - if rf, ok := ret.Get(0).(func(context.Context, *core.PreparedOperation) (fftypes.JSONObject, bool, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *core.PreparedOperation) (fftypes.JSONObject, core.OpPhase, error)); ok { return rf(ctx, op) } if rf, ok := ret.Get(0).(func(context.Context, *core.PreparedOperation) fftypes.JSONObject); ok { @@ -170,10 +198,10 @@ func (_m *Manager) RunOperation(ctx context.Context, op *core.PreparedOperation) } } - if rf, ok := ret.Get(1).(func(context.Context, *core.PreparedOperation) bool); ok { + if rf, ok := ret.Get(1).(func(context.Context, *core.PreparedOperation) core.OpPhase); ok { r1 = rf(ctx, op) } else { - r1 = ret.Get(1).(bool) + r1 = ret.Get(1).(core.OpPhase) } if rf, ok := ret.Get(2).(func(context.Context, *core.PreparedOperation) error); ok { @@ -189,6 +217,10 @@ func (_m *Manager) RunOperation(ctx context.Context, op *core.PreparedOperation) func (_m *Manager) Start() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Start") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -204,13 +236,12 @@ func (_m *Manager) WaitStop() { _m.Called() } -type mockConstructorTestingTNewManager interface { +// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewManager(t interface { mock.TestingT Cleanup(func()) -} - -// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewManager(t mockConstructorTestingTNewManager) *Manager { +}) *Manager { mock := &Manager{} mock.Mock.Test(t) diff --git a/mocks/cachemocks/manager.go b/mocks/cachemocks/manager.go index 9cab453f41..d8569e2497 100644 --- a/mocks/cachemocks/manager.go +++ b/mocks/cachemocks/manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package cachemocks @@ -16,6 +16,10 @@ type Manager struct { func (_m *Manager) GetCache(cc *cache.CConfig) (cache.CInterface, error) { ret := _m.Called(cc) + if len(ret) == 0 { + panic("no return value specified for GetCache") + } + var r0 cache.CInterface var r1 error if rf, ok := ret.Get(0).(func(*cache.CConfig) (cache.CInterface, error)); ok { @@ -38,13 +42,17 @@ func (_m *Manager) GetCache(cc *cache.CConfig) (cache.CInterface, error) { return r0, r1 } -// ListKeys provides a mock function with given fields: -func (_m *Manager) ListKeys() []string { - ret := _m.Called() +// ListCacheNames provides a mock function with given fields: namespace +func (_m *Manager) ListCacheNames(namespace string) []string { + ret := _m.Called(namespace) + + if len(ret) == 0 { + panic("no return value specified for ListCacheNames") + } var r0 []string - if rf, ok := ret.Get(0).(func() []string); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(string) []string); ok { + r0 = rf(namespace) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]string) @@ -59,13 +67,12 @@ func (_m *Manager) ResetCachesForNamespace(ns string) { _m.Called(ns) } -type mockConstructorTestingTNewManager interface { +// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewManager(t interface { mock.TestingT Cleanup(func()) -} - -// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewManager(t mockConstructorTestingTNewManager) *Manager { +}) *Manager { mock := &Manager{} mock.Mock.Test(t) diff --git a/mocks/contractmocks/manager.go b/mocks/contractmocks/manager.go index e4ac41aaf8..042061c6f1 100644 --- a/mocks/contractmocks/manager.go +++ b/mocks/contractmocks/manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package contractmocks @@ -23,6 +23,10 @@ type Manager struct { func (_m *Manager) AddContractAPIListener(ctx context.Context, apiName string, eventPath string, listener *core.ContractListener) (*core.ContractListener, error) { ret := _m.Called(ctx, apiName, eventPath, listener) + if len(ret) == 0 { + panic("no return value specified for AddContractAPIListener") + } + var r0 *core.ContractListener var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, string, *core.ContractListener) (*core.ContractListener, error)); ok { @@ -49,6 +53,10 @@ func (_m *Manager) AddContractAPIListener(ctx context.Context, apiName string, e func (_m *Manager) AddContractListener(ctx context.Context, listener *core.ContractListenerInput) (*core.ContractListener, error) { ret := _m.Called(ctx, listener) + if len(ret) == 0 { + panic("no return value specified for AddContractListener") + } + var r0 *core.ContractListener var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.ContractListenerInput) (*core.ContractListener, error)); ok { @@ -71,10 +79,32 @@ func (_m *Manager) AddContractListener(ctx context.Context, listener *core.Contr return r0, r1 } +// DeleteContractAPI provides a mock function with given fields: ctx, apiName +func (_m *Manager) DeleteContractAPI(ctx context.Context, apiName string) error { + ret := _m.Called(ctx, apiName) + + if len(ret) == 0 { + panic("no return value specified for DeleteContractAPI") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, apiName) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // DeleteContractListenerByNameOrID provides a mock function with given fields: ctx, nameOrID func (_m *Manager) DeleteContractListenerByNameOrID(ctx context.Context, nameOrID string) error { ret := _m.Called(ctx, nameOrID) + if len(ret) == 0 { + panic("no return value specified for DeleteContractListenerByNameOrID") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { r0 = rf(ctx, nameOrID) @@ -85,10 +115,32 @@ func (_m *Manager) DeleteContractListenerByNameOrID(ctx context.Context, nameOrI return r0 } +// DeleteFFI provides a mock function with given fields: ctx, id +func (_m *Manager) DeleteFFI(ctx context.Context, id *fftypes.UUID) error { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for DeleteFFI") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID) error); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // DeployContract provides a mock function with given fields: ctx, req, waitConfirm func (_m *Manager) DeployContract(ctx context.Context, req *core.ContractDeployRequest, waitConfirm bool) (interface{}, error) { ret := _m.Called(ctx, req, waitConfirm) + if len(ret) == 0 { + panic("no return value specified for DeployContract") + } + var r0 interface{} var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.ContractDeployRequest, bool) (interface{}, error)); ok { @@ -115,6 +167,10 @@ func (_m *Manager) DeployContract(ctx context.Context, req *core.ContractDeployR func (_m *Manager) GenerateFFI(ctx context.Context, generationRequest *fftypes.FFIGenerationRequest) (*fftypes.FFI, error) { ret := _m.Called(ctx, generationRequest) + if len(ret) == 0 { + panic("no return value specified for GenerateFFI") + } + var r0 *fftypes.FFI var r1 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.FFIGenerationRequest) (*fftypes.FFI, error)); ok { @@ -141,6 +197,10 @@ func (_m *Manager) GenerateFFI(ctx context.Context, generationRequest *fftypes.F func (_m *Manager) GetContractAPI(ctx context.Context, httpServerURL string, apiName string) (*core.ContractAPI, error) { ret := _m.Called(ctx, httpServerURL, apiName) + if len(ret) == 0 { + panic("no return value specified for GetContractAPI") + } + var r0 *core.ContractAPI var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, string) (*core.ContractAPI, error)); ok { @@ -167,6 +227,10 @@ func (_m *Manager) GetContractAPI(ctx context.Context, httpServerURL string, api func (_m *Manager) GetContractAPIInterface(ctx context.Context, apiName string) (*fftypes.FFI, error) { ret := _m.Called(ctx, apiName) + if len(ret) == 0 { + panic("no return value specified for GetContractAPIInterface") + } + var r0 *fftypes.FFI var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*fftypes.FFI, error)); ok { @@ -193,6 +257,10 @@ func (_m *Manager) GetContractAPIInterface(ctx context.Context, apiName string) func (_m *Manager) GetContractAPIListeners(ctx context.Context, apiName string, eventPath string, filter ffapi.AndFilter) ([]*core.ContractListener, *ffapi.FilterResult, error) { ret := _m.Called(ctx, apiName, eventPath, filter) + if len(ret) == 0 { + panic("no return value specified for GetContractAPIListeners") + } + var r0 []*core.ContractListener var r1 *ffapi.FilterResult var r2 error @@ -228,6 +296,10 @@ func (_m *Manager) GetContractAPIListeners(ctx context.Context, apiName string, func (_m *Manager) GetContractAPIs(ctx context.Context, httpServerURL string, filter ffapi.AndFilter) ([]*core.ContractAPI, *ffapi.FilterResult, error) { ret := _m.Called(ctx, httpServerURL, filter) + if len(ret) == 0 { + panic("no return value specified for GetContractAPIs") + } + var r0 []*core.ContractAPI var r1 *ffapi.FilterResult var r2 error @@ -263,6 +335,10 @@ func (_m *Manager) GetContractAPIs(ctx context.Context, httpServerURL string, fi func (_m *Manager) GetContractListenerByNameOrID(ctx context.Context, nameOrID string) (*core.ContractListener, error) { ret := _m.Called(ctx, nameOrID) + if len(ret) == 0 { + panic("no return value specified for GetContractListenerByNameOrID") + } + var r0 *core.ContractListener var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.ContractListener, error)); ok { @@ -289,6 +365,10 @@ func (_m *Manager) GetContractListenerByNameOrID(ctx context.Context, nameOrID s func (_m *Manager) GetContractListenerByNameOrIDWithStatus(ctx context.Context, nameOrID string) (*core.ContractListenerWithStatus, error) { ret := _m.Called(ctx, nameOrID) + if len(ret) == 0 { + panic("no return value specified for GetContractListenerByNameOrIDWithStatus") + } + var r0 *core.ContractListenerWithStatus var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.ContractListenerWithStatus, error)); ok { @@ -315,6 +395,10 @@ func (_m *Manager) GetContractListenerByNameOrIDWithStatus(ctx context.Context, func (_m *Manager) GetContractListeners(ctx context.Context, filter ffapi.AndFilter) ([]*core.ContractListener, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetContractListeners") + } + var r0 []*core.ContractListener var r1 *ffapi.FilterResult var r2 error @@ -350,6 +434,10 @@ func (_m *Manager) GetContractListeners(ctx context.Context, filter ffapi.AndFil func (_m *Manager) GetFFI(ctx context.Context, name string, version string) (*fftypes.FFI, error) { ret := _m.Called(ctx, name, version) + if len(ret) == 0 { + panic("no return value specified for GetFFI") + } + var r0 *fftypes.FFI var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, string) (*fftypes.FFI, error)); ok { @@ -376,6 +464,10 @@ func (_m *Manager) GetFFI(ctx context.Context, name string, version string) (*ff func (_m *Manager) GetFFIByID(ctx context.Context, id *fftypes.UUID) (*fftypes.FFI, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetFFIByID") + } + var r0 *fftypes.FFI var r1 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID) (*fftypes.FFI, error)); ok { @@ -402,6 +494,10 @@ func (_m *Manager) GetFFIByID(ctx context.Context, id *fftypes.UUID) (*fftypes.F func (_m *Manager) GetFFIByIDWithChildren(ctx context.Context, id *fftypes.UUID) (*fftypes.FFI, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetFFIByIDWithChildren") + } + var r0 *fftypes.FFI var r1 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID) (*fftypes.FFI, error)); ok { @@ -428,6 +524,10 @@ func (_m *Manager) GetFFIByIDWithChildren(ctx context.Context, id *fftypes.UUID) func (_m *Manager) GetFFIEvents(ctx context.Context, id *fftypes.UUID) ([]*fftypes.FFIEvent, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetFFIEvents") + } + var r0 []*fftypes.FFIEvent var r1 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID) ([]*fftypes.FFIEvent, error)); ok { @@ -454,6 +554,10 @@ func (_m *Manager) GetFFIEvents(ctx context.Context, id *fftypes.UUID) ([]*fftyp func (_m *Manager) GetFFIMethods(ctx context.Context, id *fftypes.UUID) ([]*fftypes.FFIMethod, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetFFIMethods") + } + var r0 []*fftypes.FFIMethod var r1 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID) ([]*fftypes.FFIMethod, error)); ok { @@ -480,6 +584,10 @@ func (_m *Manager) GetFFIMethods(ctx context.Context, id *fftypes.UUID) ([]*ffty func (_m *Manager) GetFFIWithChildren(ctx context.Context, name string, version string) (*fftypes.FFI, error) { ret := _m.Called(ctx, name, version) + if len(ret) == 0 { + panic("no return value specified for GetFFIWithChildren") + } + var r0 *fftypes.FFI var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, string) (*fftypes.FFI, error)); ok { @@ -506,6 +614,10 @@ func (_m *Manager) GetFFIWithChildren(ctx context.Context, name string, version func (_m *Manager) GetFFIs(ctx context.Context, filter ffapi.AndFilter) ([]*fftypes.FFI, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetFFIs") + } + var r0 []*fftypes.FFI var r1 *ffapi.FilterResult var r2 error @@ -541,6 +653,10 @@ func (_m *Manager) GetFFIs(ctx context.Context, filter ffapi.AndFilter) ([]*ffty func (_m *Manager) InvokeContract(ctx context.Context, req *core.ContractCallRequest, waitConfirm bool) (interface{}, error) { ret := _m.Called(ctx, req, waitConfirm) + if len(ret) == 0 { + panic("no return value specified for InvokeContract") + } + var r0 interface{} var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.ContractCallRequest, bool) (interface{}, error)); ok { @@ -567,6 +683,10 @@ func (_m *Manager) InvokeContract(ctx context.Context, req *core.ContractCallReq func (_m *Manager) InvokeContractAPI(ctx context.Context, apiName string, methodPath string, req *core.ContractCallRequest, waitConfirm bool) (interface{}, error) { ret := _m.Called(ctx, apiName, methodPath, req, waitConfirm) + if len(ret) == 0 { + panic("no return value specified for InvokeContractAPI") + } + var r0 interface{} var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, string, *core.ContractCallRequest, bool) (interface{}, error)); ok { @@ -593,6 +713,10 @@ func (_m *Manager) InvokeContractAPI(ctx context.Context, apiName string, method func (_m *Manager) Name() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Name") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -607,6 +731,10 @@ func (_m *Manager) Name() string { func (_m *Manager) PrepareOperation(ctx context.Context, op *core.Operation) (*core.PreparedOperation, error) { ret := _m.Called(ctx, op) + if len(ret) == 0 { + panic("no return value specified for PrepareOperation") + } + var r0 *core.PreparedOperation var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.Operation) (*core.PreparedOperation, error)); ok { @@ -633,6 +761,10 @@ func (_m *Manager) PrepareOperation(ctx context.Context, op *core.Operation) (*c func (_m *Manager) ResolveContractAPI(ctx context.Context, httpServerURL string, api *core.ContractAPI) error { ret := _m.Called(ctx, httpServerURL, api) + if len(ret) == 0 { + panic("no return value specified for ResolveContractAPI") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, *core.ContractAPI) error); ok { r0 = rf(ctx, httpServerURL, api) @@ -647,6 +779,10 @@ func (_m *Manager) ResolveContractAPI(ctx context.Context, httpServerURL string, func (_m *Manager) ResolveFFI(ctx context.Context, ffi *fftypes.FFI) error { ret := _m.Called(ctx, ffi) + if len(ret) == 0 { + panic("no return value specified for ResolveFFI") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.FFI) error); ok { r0 = rf(ctx, ffi) @@ -661,6 +797,10 @@ func (_m *Manager) ResolveFFI(ctx context.Context, ffi *fftypes.FFI) error { func (_m *Manager) ResolveFFIReference(ctx context.Context, ref *fftypes.FFIReference) error { ret := _m.Called(ctx, ref) + if len(ret) == 0 { + panic("no return value specified for ResolveFFIReference") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.FFIReference) error); ok { r0 = rf(ctx, ref) @@ -672,13 +812,17 @@ func (_m *Manager) ResolveFFIReference(ctx context.Context, ref *fftypes.FFIRefe } // RunOperation provides a mock function with given fields: ctx, op -func (_m *Manager) RunOperation(ctx context.Context, op *core.PreparedOperation) (fftypes.JSONObject, bool, error) { +func (_m *Manager) RunOperation(ctx context.Context, op *core.PreparedOperation) (fftypes.JSONObject, core.OpPhase, error) { ret := _m.Called(ctx, op) + if len(ret) == 0 { + panic("no return value specified for RunOperation") + } + var r0 fftypes.JSONObject - var r1 bool + var r1 core.OpPhase var r2 error - if rf, ok := ret.Get(0).(func(context.Context, *core.PreparedOperation) (fftypes.JSONObject, bool, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *core.PreparedOperation) (fftypes.JSONObject, core.OpPhase, error)); ok { return rf(ctx, op) } if rf, ok := ret.Get(0).(func(context.Context, *core.PreparedOperation) fftypes.JSONObject); ok { @@ -689,10 +833,10 @@ func (_m *Manager) RunOperation(ctx context.Context, op *core.PreparedOperation) } } - if rf, ok := ret.Get(1).(func(context.Context, *core.PreparedOperation) bool); ok { + if rf, ok := ret.Get(1).(func(context.Context, *core.PreparedOperation) core.OpPhase); ok { r1 = rf(ctx, op) } else { - r1 = ret.Get(1).(bool) + r1 = ret.Get(1).(core.OpPhase) } if rf, ok := ret.Get(2).(func(context.Context, *core.PreparedOperation) error); ok { @@ -704,13 +848,12 @@ func (_m *Manager) RunOperation(ctx context.Context, op *core.PreparedOperation) return r0, r1, r2 } -type mockConstructorTestingTNewManager interface { +// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewManager(t interface { mock.TestingT Cleanup(func()) -} - -// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewManager(t mockConstructorTestingTNewManager) *Manager { +}) *Manager { mock := &Manager{} mock.Mock.Test(t) diff --git a/mocks/coremocks/operation_callbacks.go b/mocks/coremocks/operation_callbacks.go index 6be426d078..c628ce6a45 100644 --- a/mocks/coremocks/operation_callbacks.go +++ b/mocks/coremocks/operation_callbacks.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package coremocks @@ -17,13 +17,12 @@ func (_m *OperationCallbacks) OperationUpdate(update *core.OperationUpdate) { _m.Called(update) } -type mockConstructorTestingTNewOperationCallbacks interface { +// NewOperationCallbacks creates a new instance of OperationCallbacks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewOperationCallbacks(t interface { mock.TestingT Cleanup(func()) -} - -// NewOperationCallbacks creates a new instance of OperationCallbacks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewOperationCallbacks(t mockConstructorTestingTNewOperationCallbacks) *OperationCallbacks { +}) *OperationCallbacks { mock := &OperationCallbacks{} mock.Mock.Test(t) diff --git a/mocks/databasemocks/callbacks.go b/mocks/databasemocks/callbacks.go index b694b44fe2..ff790d9eba 100644 --- a/mocks/databasemocks/callbacks.go +++ b/mocks/databasemocks/callbacks.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package databasemocks @@ -36,13 +36,12 @@ func (_m *Callbacks) UUIDCollectionNSEvent(resType database.UUIDCollectionNS, ev _m.Called(resType, eventType, namespace, id) } -type mockConstructorTestingTNewCallbacks interface { +// NewCallbacks creates a new instance of Callbacks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCallbacks(t interface { mock.TestingT Cleanup(func()) -} - -// NewCallbacks creates a new instance of Callbacks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCallbacks(t mockConstructorTestingTNewCallbacks) *Callbacks { +}) *Callbacks { mock := &Callbacks{} mock.Mock.Test(t) diff --git a/mocks/databasemocks/plugin.go b/mocks/databasemocks/plugin.go index b0e51b6471..9050baa0b6 100644 --- a/mocks/databasemocks/plugin.go +++ b/mocks/databasemocks/plugin.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package databasemocks @@ -27,6 +27,10 @@ type Plugin struct { func (_m *Plugin) Capabilities() *database.Capabilities { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Capabilities") + } + var r0 *database.Capabilities if rf, ok := ret.Get(0).(func() *database.Capabilities); ok { r0 = rf() @@ -43,6 +47,10 @@ func (_m *Plugin) Capabilities() *database.Capabilities { func (_m *Plugin) DeleteBlob(ctx context.Context, sequence int64) error { ret := _m.Called(ctx, sequence) + if len(ret) == 0 { + panic("no return value specified for DeleteBlob") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok { r0 = rf(ctx, sequence) @@ -53,10 +61,32 @@ func (_m *Plugin) DeleteBlob(ctx context.Context, sequence int64) error { return r0 } +// DeleteContractAPI provides a mock function with given fields: ctx, namespace, id +func (_m *Plugin) DeleteContractAPI(ctx context.Context, namespace string, id *fftypes.UUID) error { + ret := _m.Called(ctx, namespace, id) + + if len(ret) == 0 { + panic("no return value specified for DeleteContractAPI") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID) error); ok { + r0 = rf(ctx, namespace, id) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // DeleteContractListenerByID provides a mock function with given fields: ctx, namespace, id func (_m *Plugin) DeleteContractListenerByID(ctx context.Context, namespace string, id *fftypes.UUID) error { ret := _m.Called(ctx, namespace, id) + if len(ret) == 0 { + panic("no return value specified for DeleteContractListenerByID") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID) error); ok { r0 = rf(ctx, namespace, id) @@ -71,6 +101,28 @@ func (_m *Plugin) DeleteContractListenerByID(ctx context.Context, namespace stri func (_m *Plugin) DeleteData(ctx context.Context, namespace string, id *fftypes.UUID) error { ret := _m.Called(ctx, namespace, id) + if len(ret) == 0 { + panic("no return value specified for DeleteData") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID) error); ok { + r0 = rf(ctx, namespace, id) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteFFI provides a mock function with given fields: ctx, namespace, id +func (_m *Plugin) DeleteFFI(ctx context.Context, namespace string, id *fftypes.UUID) error { + ret := _m.Called(ctx, namespace, id) + + if len(ret) == 0 { + panic("no return value specified for DeleteFFI") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID) error); ok { r0 = rf(ctx, namespace, id) @@ -85,6 +137,10 @@ func (_m *Plugin) DeleteData(ctx context.Context, namespace string, id *fftypes. func (_m *Plugin) DeleteNonce(ctx context.Context, hash *fftypes.Bytes32) error { ret := _m.Called(ctx, hash) + if len(ret) == 0 { + panic("no return value specified for DeleteNonce") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.Bytes32) error); ok { r0 = rf(ctx, hash) @@ -99,6 +155,10 @@ func (_m *Plugin) DeleteNonce(ctx context.Context, hash *fftypes.Bytes32) error func (_m *Plugin) DeleteOffset(ctx context.Context, t fftypes.FFEnum, name string) error { ret := _m.Called(ctx, t, name) + if len(ret) == 0 { + panic("no return value specified for DeleteOffset") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, fftypes.FFEnum, string) error); ok { r0 = rf(ctx, t, name) @@ -113,6 +173,64 @@ func (_m *Plugin) DeleteOffset(ctx context.Context, t fftypes.FFEnum, name strin func (_m *Plugin) DeleteSubscriptionByID(ctx context.Context, namespace string, id *fftypes.UUID) error { ret := _m.Called(ctx, namespace, id) + if len(ret) == 0 { + panic("no return value specified for DeleteSubscriptionByID") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID) error); ok { + r0 = rf(ctx, namespace, id) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteTokenApprovals provides a mock function with given fields: ctx, namespace, poolID +func (_m *Plugin) DeleteTokenApprovals(ctx context.Context, namespace string, poolID *fftypes.UUID) error { + ret := _m.Called(ctx, namespace, poolID) + + if len(ret) == 0 { + panic("no return value specified for DeleteTokenApprovals") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID) error); ok { + r0 = rf(ctx, namespace, poolID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteTokenBalances provides a mock function with given fields: ctx, namespace, poolID +func (_m *Plugin) DeleteTokenBalances(ctx context.Context, namespace string, poolID *fftypes.UUID) error { + ret := _m.Called(ctx, namespace, poolID) + + if len(ret) == 0 { + panic("no return value specified for DeleteTokenBalances") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID) error); ok { + r0 = rf(ctx, namespace, poolID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteTokenPool provides a mock function with given fields: ctx, namespace, id +func (_m *Plugin) DeleteTokenPool(ctx context.Context, namespace string, id *fftypes.UUID) error { + ret := _m.Called(ctx, namespace, id) + + if len(ret) == 0 { + panic("no return value specified for DeleteTokenPool") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID) error); ok { r0 = rf(ctx, namespace, id) @@ -123,10 +241,32 @@ func (_m *Plugin) DeleteSubscriptionByID(ctx context.Context, namespace string, return r0 } +// DeleteTokenTransfers provides a mock function with given fields: ctx, namespace, poolID +func (_m *Plugin) DeleteTokenTransfers(ctx context.Context, namespace string, poolID *fftypes.UUID) error { + ret := _m.Called(ctx, namespace, poolID) + + if len(ret) == 0 { + panic("no return value specified for DeleteTokenTransfers") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID) error); ok { + r0 = rf(ctx, namespace, poolID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // GetBatchByID provides a mock function with given fields: ctx, namespace, id func (_m *Plugin) GetBatchByID(ctx context.Context, namespace string, id *fftypes.UUID) (*core.BatchPersisted, error) { ret := _m.Called(ctx, namespace, id) + if len(ret) == 0 { + panic("no return value specified for GetBatchByID") + } + var r0 *core.BatchPersisted var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID) (*core.BatchPersisted, error)); ok { @@ -153,6 +293,10 @@ func (_m *Plugin) GetBatchByID(ctx context.Context, namespace string, id *fftype func (_m *Plugin) GetBatchIDsForDataAttachments(ctx context.Context, namespace string, dataIDs []*fftypes.UUID) ([]*fftypes.UUID, error) { ret := _m.Called(ctx, namespace, dataIDs) + if len(ret) == 0 { + panic("no return value specified for GetBatchIDsForDataAttachments") + } + var r0 []*fftypes.UUID var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, []*fftypes.UUID) ([]*fftypes.UUID, error)); ok { @@ -179,6 +323,10 @@ func (_m *Plugin) GetBatchIDsForDataAttachments(ctx context.Context, namespace s func (_m *Plugin) GetBatchIDsForMessages(ctx context.Context, namespace string, msgIDs []*fftypes.UUID) ([]*fftypes.UUID, error) { ret := _m.Called(ctx, namespace, msgIDs) + if len(ret) == 0 { + panic("no return value specified for GetBatchIDsForMessages") + } + var r0 []*fftypes.UUID var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, []*fftypes.UUID) ([]*fftypes.UUID, error)); ok { @@ -205,6 +353,10 @@ func (_m *Plugin) GetBatchIDsForMessages(ctx context.Context, namespace string, func (_m *Plugin) GetBatches(ctx context.Context, namespace string, filter ffapi.Filter) ([]*core.BatchPersisted, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, filter) + if len(ret) == 0 { + panic("no return value specified for GetBatches") + } + var r0 []*core.BatchPersisted var r1 *ffapi.FilterResult var r2 error @@ -240,6 +392,10 @@ func (_m *Plugin) GetBatches(ctx context.Context, namespace string, filter ffapi func (_m *Plugin) GetBlobs(ctx context.Context, namespace string, filter ffapi.Filter) ([]*core.Blob, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, filter) + if len(ret) == 0 { + panic("no return value specified for GetBlobs") + } + var r0 []*core.Blob var r1 *ffapi.FilterResult var r2 error @@ -275,6 +431,10 @@ func (_m *Plugin) GetBlobs(ctx context.Context, namespace string, filter ffapi.F func (_m *Plugin) GetBlockchainEventByID(ctx context.Context, namespace string, id *fftypes.UUID) (*core.BlockchainEvent, error) { ret := _m.Called(ctx, namespace, id) + if len(ret) == 0 { + panic("no return value specified for GetBlockchainEventByID") + } + var r0 *core.BlockchainEvent var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID) (*core.BlockchainEvent, error)); ok { @@ -301,6 +461,10 @@ func (_m *Plugin) GetBlockchainEventByID(ctx context.Context, namespace string, func (_m *Plugin) GetBlockchainEventByProtocolID(ctx context.Context, namespace string, listener *fftypes.UUID, protocolID string) (*core.BlockchainEvent, error) { ret := _m.Called(ctx, namespace, listener, protocolID) + if len(ret) == 0 { + panic("no return value specified for GetBlockchainEventByProtocolID") + } + var r0 *core.BlockchainEvent var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID, string) (*core.BlockchainEvent, error)); ok { @@ -327,6 +491,10 @@ func (_m *Plugin) GetBlockchainEventByProtocolID(ctx context.Context, namespace func (_m *Plugin) GetBlockchainEvents(ctx context.Context, namespace string, filter ffapi.Filter) ([]*core.BlockchainEvent, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, filter) + if len(ret) == 0 { + panic("no return value specified for GetBlockchainEvents") + } + var r0 []*core.BlockchainEvent var r1 *ffapi.FilterResult var r2 error @@ -362,6 +530,10 @@ func (_m *Plugin) GetBlockchainEvents(ctx context.Context, namespace string, fil func (_m *Plugin) GetChartHistogram(ctx context.Context, namespace string, intervals []core.ChartHistogramInterval, collection database.CollectionName) ([]*core.ChartHistogram, error) { ret := _m.Called(ctx, namespace, intervals, collection) + if len(ret) == 0 { + panic("no return value specified for GetChartHistogram") + } + var r0 []*core.ChartHistogram var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, []core.ChartHistogramInterval, database.CollectionName) ([]*core.ChartHistogram, error)); ok { @@ -388,6 +560,10 @@ func (_m *Plugin) GetChartHistogram(ctx context.Context, namespace string, inter func (_m *Plugin) GetContractAPIByID(ctx context.Context, namespace string, id *fftypes.UUID) (*core.ContractAPI, error) { ret := _m.Called(ctx, namespace, id) + if len(ret) == 0 { + panic("no return value specified for GetContractAPIByID") + } + var r0 *core.ContractAPI var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID) (*core.ContractAPI, error)); ok { @@ -414,6 +590,10 @@ func (_m *Plugin) GetContractAPIByID(ctx context.Context, namespace string, id * func (_m *Plugin) GetContractAPIByName(ctx context.Context, namespace string, name string) (*core.ContractAPI, error) { ret := _m.Called(ctx, namespace, name) + if len(ret) == 0 { + panic("no return value specified for GetContractAPIByName") + } + var r0 *core.ContractAPI var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, string) (*core.ContractAPI, error)); ok { @@ -436,10 +616,44 @@ func (_m *Plugin) GetContractAPIByName(ctx context.Context, namespace string, na return r0, r1 } +// GetContractAPIByNetworkName provides a mock function with given fields: ctx, namespace, networkName +func (_m *Plugin) GetContractAPIByNetworkName(ctx context.Context, namespace string, networkName string) (*core.ContractAPI, error) { + ret := _m.Called(ctx, namespace, networkName) + + if len(ret) == 0 { + panic("no return value specified for GetContractAPIByNetworkName") + } + + var r0 *core.ContractAPI + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) (*core.ContractAPI, error)); ok { + return rf(ctx, namespace, networkName) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string) *core.ContractAPI); ok { + r0 = rf(ctx, namespace, networkName) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*core.ContractAPI) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, namespace, networkName) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetContractAPIs provides a mock function with given fields: ctx, namespace, filter func (_m *Plugin) GetContractAPIs(ctx context.Context, namespace string, filter ffapi.AndFilter) ([]*core.ContractAPI, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, filter) + if len(ret) == 0 { + panic("no return value specified for GetContractAPIs") + } + var r0 []*core.ContractAPI var r1 *ffapi.FilterResult var r2 error @@ -475,6 +689,10 @@ func (_m *Plugin) GetContractAPIs(ctx context.Context, namespace string, filter func (_m *Plugin) GetContractListener(ctx context.Context, namespace string, name string) (*core.ContractListener, error) { ret := _m.Called(ctx, namespace, name) + if len(ret) == 0 { + panic("no return value specified for GetContractListener") + } + var r0 *core.ContractListener var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, string) (*core.ContractListener, error)); ok { @@ -501,6 +719,10 @@ func (_m *Plugin) GetContractListener(ctx context.Context, namespace string, nam func (_m *Plugin) GetContractListenerByBackendID(ctx context.Context, namespace string, id string) (*core.ContractListener, error) { ret := _m.Called(ctx, namespace, id) + if len(ret) == 0 { + panic("no return value specified for GetContractListenerByBackendID") + } + var r0 *core.ContractListener var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, string) (*core.ContractListener, error)); ok { @@ -527,6 +749,10 @@ func (_m *Plugin) GetContractListenerByBackendID(ctx context.Context, namespace func (_m *Plugin) GetContractListenerByID(ctx context.Context, namespace string, id *fftypes.UUID) (*core.ContractListener, error) { ret := _m.Called(ctx, namespace, id) + if len(ret) == 0 { + panic("no return value specified for GetContractListenerByID") + } + var r0 *core.ContractListener var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID) (*core.ContractListener, error)); ok { @@ -553,6 +779,10 @@ func (_m *Plugin) GetContractListenerByID(ctx context.Context, namespace string, func (_m *Plugin) GetContractListeners(ctx context.Context, namespace string, filter ffapi.Filter) ([]*core.ContractListener, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, filter) + if len(ret) == 0 { + panic("no return value specified for GetContractListeners") + } + var r0 []*core.ContractListener var r1 *ffapi.FilterResult var r2 error @@ -588,6 +818,10 @@ func (_m *Plugin) GetContractListeners(ctx context.Context, namespace string, fi func (_m *Plugin) GetData(ctx context.Context, namespace string, filter ffapi.Filter) (core.DataArray, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, filter) + if len(ret) == 0 { + panic("no return value specified for GetData") + } + var r0 core.DataArray var r1 *ffapi.FilterResult var r2 error @@ -623,6 +857,10 @@ func (_m *Plugin) GetData(ctx context.Context, namespace string, filter ffapi.Fi func (_m *Plugin) GetDataByID(ctx context.Context, namespace string, id *fftypes.UUID, withValue bool) (*core.Data, error) { ret := _m.Called(ctx, namespace, id, withValue) + if len(ret) == 0 { + panic("no return value specified for GetDataByID") + } + var r0 *core.Data var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID, bool) (*core.Data, error)); ok { @@ -649,6 +887,10 @@ func (_m *Plugin) GetDataByID(ctx context.Context, namespace string, id *fftypes func (_m *Plugin) GetDataRefs(ctx context.Context, namespace string, filter ffapi.Filter) (core.DataRefs, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, filter) + if len(ret) == 0 { + panic("no return value specified for GetDataRefs") + } + var r0 core.DataRefs var r1 *ffapi.FilterResult var r2 error @@ -684,6 +926,10 @@ func (_m *Plugin) GetDataRefs(ctx context.Context, namespace string, filter ffap func (_m *Plugin) GetDataSubPaths(ctx context.Context, namespace string, path string) ([]string, error) { ret := _m.Called(ctx, namespace, path) + if len(ret) == 0 { + panic("no return value specified for GetDataSubPaths") + } + var r0 []string var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, string) ([]string, error)); ok { @@ -710,6 +956,10 @@ func (_m *Plugin) GetDataSubPaths(ctx context.Context, namespace string, path st func (_m *Plugin) GetDatatypeByID(ctx context.Context, namespace string, id *fftypes.UUID) (*core.Datatype, error) { ret := _m.Called(ctx, namespace, id) + if len(ret) == 0 { + panic("no return value specified for GetDatatypeByID") + } + var r0 *core.Datatype var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID) (*core.Datatype, error)); ok { @@ -736,6 +986,10 @@ func (_m *Plugin) GetDatatypeByID(ctx context.Context, namespace string, id *fft func (_m *Plugin) GetDatatypeByName(ctx context.Context, namespace string, name string, version string) (*core.Datatype, error) { ret := _m.Called(ctx, namespace, name, version) + if len(ret) == 0 { + panic("no return value specified for GetDatatypeByName") + } + var r0 *core.Datatype var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, string, string) (*core.Datatype, error)); ok { @@ -762,6 +1016,10 @@ func (_m *Plugin) GetDatatypeByName(ctx context.Context, namespace string, name func (_m *Plugin) GetDatatypes(ctx context.Context, namespace string, filter ffapi.Filter) ([]*core.Datatype, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, filter) + if len(ret) == 0 { + panic("no return value specified for GetDatatypes") + } + var r0 []*core.Datatype var r1 *ffapi.FilterResult var r2 error @@ -797,6 +1055,10 @@ func (_m *Plugin) GetDatatypes(ctx context.Context, namespace string, filter ffa func (_m *Plugin) GetEventByID(ctx context.Context, namespace string, id *fftypes.UUID) (*core.Event, error) { ret := _m.Called(ctx, namespace, id) + if len(ret) == 0 { + panic("no return value specified for GetEventByID") + } + var r0 *core.Event var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID) (*core.Event, error)); ok { @@ -823,6 +1085,10 @@ func (_m *Plugin) GetEventByID(ctx context.Context, namespace string, id *fftype func (_m *Plugin) GetEvents(ctx context.Context, namespace string, filter ffapi.Filter) ([]*core.Event, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, filter) + if len(ret) == 0 { + panic("no return value specified for GetEvents") + } + var r0 []*core.Event var r1 *ffapi.FilterResult var r2 error @@ -858,6 +1124,10 @@ func (_m *Plugin) GetEvents(ctx context.Context, namespace string, filter ffapi. func (_m *Plugin) GetFFI(ctx context.Context, namespace string, name string, version string) (*fftypes.FFI, error) { ret := _m.Called(ctx, namespace, name, version) + if len(ret) == 0 { + panic("no return value specified for GetFFI") + } + var r0 *fftypes.FFI var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, string, string) (*fftypes.FFI, error)); ok { @@ -884,6 +1154,10 @@ func (_m *Plugin) GetFFI(ctx context.Context, namespace string, name string, ver func (_m *Plugin) GetFFIByID(ctx context.Context, namespace string, id *fftypes.UUID) (*fftypes.FFI, error) { ret := _m.Called(ctx, namespace, id) + if len(ret) == 0 { + panic("no return value specified for GetFFIByID") + } + var r0 *fftypes.FFI var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID) (*fftypes.FFI, error)); ok { @@ -906,10 +1180,44 @@ func (_m *Plugin) GetFFIByID(ctx context.Context, namespace string, id *fftypes. return r0, r1 } +// GetFFIByNetworkName provides a mock function with given fields: ctx, namespace, networkName, version +func (_m *Plugin) GetFFIByNetworkName(ctx context.Context, namespace string, networkName string, version string) (*fftypes.FFI, error) { + ret := _m.Called(ctx, namespace, networkName, version) + + if len(ret) == 0 { + panic("no return value specified for GetFFIByNetworkName") + } + + var r0 *fftypes.FFI + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string) (*fftypes.FFI, error)); ok { + return rf(ctx, namespace, networkName, version) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, string) *fftypes.FFI); ok { + r0 = rf(ctx, namespace, networkName, version) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*fftypes.FFI) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok { + r1 = rf(ctx, namespace, networkName, version) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetFFIErrors provides a mock function with given fields: ctx, namespace, filter func (_m *Plugin) GetFFIErrors(ctx context.Context, namespace string, filter ffapi.Filter) ([]*fftypes.FFIError, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, filter) + if len(ret) == 0 { + panic("no return value specified for GetFFIErrors") + } + var r0 []*fftypes.FFIError var r1 *ffapi.FilterResult var r2 error @@ -945,6 +1253,10 @@ func (_m *Plugin) GetFFIErrors(ctx context.Context, namespace string, filter ffa func (_m *Plugin) GetFFIEvent(ctx context.Context, namespace string, interfaceID *fftypes.UUID, pathName string) (*fftypes.FFIEvent, error) { ret := _m.Called(ctx, namespace, interfaceID, pathName) + if len(ret) == 0 { + panic("no return value specified for GetFFIEvent") + } + var r0 *fftypes.FFIEvent var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID, string) (*fftypes.FFIEvent, error)); ok { @@ -971,6 +1283,10 @@ func (_m *Plugin) GetFFIEvent(ctx context.Context, namespace string, interfaceID func (_m *Plugin) GetFFIEvents(ctx context.Context, namespace string, filter ffapi.Filter) ([]*fftypes.FFIEvent, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, filter) + if len(ret) == 0 { + panic("no return value specified for GetFFIEvents") + } + var r0 []*fftypes.FFIEvent var r1 *ffapi.FilterResult var r2 error @@ -1006,6 +1322,10 @@ func (_m *Plugin) GetFFIEvents(ctx context.Context, namespace string, filter ffa func (_m *Plugin) GetFFIMethod(ctx context.Context, namespace string, interfaceID *fftypes.UUID, pathName string) (*fftypes.FFIMethod, error) { ret := _m.Called(ctx, namespace, interfaceID, pathName) + if len(ret) == 0 { + panic("no return value specified for GetFFIMethod") + } + var r0 *fftypes.FFIMethod var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID, string) (*fftypes.FFIMethod, error)); ok { @@ -1032,6 +1352,10 @@ func (_m *Plugin) GetFFIMethod(ctx context.Context, namespace string, interfaceI func (_m *Plugin) GetFFIMethods(ctx context.Context, namespace string, filter ffapi.Filter) ([]*fftypes.FFIMethod, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, filter) + if len(ret) == 0 { + panic("no return value specified for GetFFIMethods") + } + var r0 []*fftypes.FFIMethod var r1 *ffapi.FilterResult var r2 error @@ -1067,6 +1391,10 @@ func (_m *Plugin) GetFFIMethods(ctx context.Context, namespace string, filter ff func (_m *Plugin) GetFFIs(ctx context.Context, namespace string, filter ffapi.Filter) ([]*fftypes.FFI, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, filter) + if len(ret) == 0 { + panic("no return value specified for GetFFIs") + } + var r0 []*fftypes.FFI var r1 *ffapi.FilterResult var r2 error @@ -1102,6 +1430,10 @@ func (_m *Plugin) GetFFIs(ctx context.Context, namespace string, filter ffapi.Fi func (_m *Plugin) GetGroupByHash(ctx context.Context, namespace string, hash *fftypes.Bytes32) (*core.Group, error) { ret := _m.Called(ctx, namespace, hash) + if len(ret) == 0 { + panic("no return value specified for GetGroupByHash") + } + var r0 *core.Group var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.Bytes32) (*core.Group, error)); ok { @@ -1128,6 +1460,10 @@ func (_m *Plugin) GetGroupByHash(ctx context.Context, namespace string, hash *ff func (_m *Plugin) GetGroups(ctx context.Context, namespace string, filter ffapi.Filter) ([]*core.Group, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, filter) + if len(ret) == 0 { + panic("no return value specified for GetGroups") + } + var r0 []*core.Group var r1 *ffapi.FilterResult var r2 error @@ -1163,6 +1499,10 @@ func (_m *Plugin) GetGroups(ctx context.Context, namespace string, filter ffapi. func (_m *Plugin) GetIdentities(ctx context.Context, namespace string, filter ffapi.Filter) ([]*core.Identity, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, filter) + if len(ret) == 0 { + panic("no return value specified for GetIdentities") + } + var r0 []*core.Identity var r1 *ffapi.FilterResult var r2 error @@ -1198,6 +1538,10 @@ func (_m *Plugin) GetIdentities(ctx context.Context, namespace string, filter ff func (_m *Plugin) GetIdentityByDID(ctx context.Context, namespace string, did string) (*core.Identity, error) { ret := _m.Called(ctx, namespace, did) + if len(ret) == 0 { + panic("no return value specified for GetIdentityByDID") + } + var r0 *core.Identity var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, string) (*core.Identity, error)); ok { @@ -1224,6 +1568,10 @@ func (_m *Plugin) GetIdentityByDID(ctx context.Context, namespace string, did st func (_m *Plugin) GetIdentityByID(ctx context.Context, namespace string, id *fftypes.UUID) (*core.Identity, error) { ret := _m.Called(ctx, namespace, id) + if len(ret) == 0 { + panic("no return value specified for GetIdentityByID") + } + var r0 *core.Identity var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID) (*core.Identity, error)); ok { @@ -1250,6 +1598,10 @@ func (_m *Plugin) GetIdentityByID(ctx context.Context, namespace string, id *fft func (_m *Plugin) GetIdentityByName(ctx context.Context, iType fftypes.FFEnum, namespace string, name string) (*core.Identity, error) { ret := _m.Called(ctx, iType, namespace, name) + if len(ret) == 0 { + panic("no return value specified for GetIdentityByName") + } + var r0 *core.Identity var r1 error if rf, ok := ret.Get(0).(func(context.Context, fftypes.FFEnum, string, string) (*core.Identity, error)); ok { @@ -1276,6 +1628,10 @@ func (_m *Plugin) GetIdentityByName(ctx context.Context, iType fftypes.FFEnum, n func (_m *Plugin) GetMessageByID(ctx context.Context, namespace string, id *fftypes.UUID) (*core.Message, error) { ret := _m.Called(ctx, namespace, id) + if len(ret) == 0 { + panic("no return value specified for GetMessageByID") + } + var r0 *core.Message var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID) (*core.Message, error)); ok { @@ -1302,6 +1658,10 @@ func (_m *Plugin) GetMessageByID(ctx context.Context, namespace string, id *ffty func (_m *Plugin) GetMessageIDs(ctx context.Context, namespace string, filter ffapi.Filter) ([]*core.IDAndSequence, error) { ret := _m.Called(ctx, namespace, filter) + if len(ret) == 0 { + panic("no return value specified for GetMessageIDs") + } + var r0 []*core.IDAndSequence var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, ffapi.Filter) ([]*core.IDAndSequence, error)); ok { @@ -1328,6 +1688,10 @@ func (_m *Plugin) GetMessageIDs(ctx context.Context, namespace string, filter ff func (_m *Plugin) GetMessages(ctx context.Context, namespace string, filter ffapi.Filter) ([]*core.Message, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, filter) + if len(ret) == 0 { + panic("no return value specified for GetMessages") + } + var r0 []*core.Message var r1 *ffapi.FilterResult var r2 error @@ -1363,6 +1727,10 @@ func (_m *Plugin) GetMessages(ctx context.Context, namespace string, filter ffap func (_m *Plugin) GetMessagesForData(ctx context.Context, namespace string, dataID *fftypes.UUID, filter ffapi.Filter) ([]*core.Message, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, dataID, filter) + if len(ret) == 0 { + panic("no return value specified for GetMessagesForData") + } + var r0 []*core.Message var r1 *ffapi.FilterResult var r2 error @@ -1398,6 +1766,10 @@ func (_m *Plugin) GetMessagesForData(ctx context.Context, namespace string, data func (_m *Plugin) GetNamespace(ctx context.Context, name string) (*core.Namespace, error) { ret := _m.Called(ctx, name) + if len(ret) == 0 { + panic("no return value specified for GetNamespace") + } + var r0 *core.Namespace var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.Namespace, error)); ok { @@ -1424,6 +1796,10 @@ func (_m *Plugin) GetNamespace(ctx context.Context, name string) (*core.Namespac func (_m *Plugin) GetNextPins(ctx context.Context, namespace string, filter ffapi.Filter) ([]*core.NextPin, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, filter) + if len(ret) == 0 { + panic("no return value specified for GetNextPins") + } + var r0 []*core.NextPin var r1 *ffapi.FilterResult var r2 error @@ -1459,6 +1835,10 @@ func (_m *Plugin) GetNextPins(ctx context.Context, namespace string, filter ffap func (_m *Plugin) GetNextPinsForContext(ctx context.Context, namespace string, _a2 *fftypes.Bytes32) ([]*core.NextPin, error) { ret := _m.Called(ctx, namespace, _a2) + if len(ret) == 0 { + panic("no return value specified for GetNextPinsForContext") + } + var r0 []*core.NextPin var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.Bytes32) ([]*core.NextPin, error)); ok { @@ -1485,6 +1865,10 @@ func (_m *Plugin) GetNextPinsForContext(ctx context.Context, namespace string, _ func (_m *Plugin) GetNonce(ctx context.Context, hash *fftypes.Bytes32) (*core.Nonce, error) { ret := _m.Called(ctx, hash) + if len(ret) == 0 { + panic("no return value specified for GetNonce") + } + var r0 *core.Nonce var r1 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.Bytes32) (*core.Nonce, error)); ok { @@ -1511,6 +1895,10 @@ func (_m *Plugin) GetNonce(ctx context.Context, hash *fftypes.Bytes32) (*core.No func (_m *Plugin) GetNonces(ctx context.Context, filter ffapi.Filter) ([]*core.Nonce, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetNonces") + } + var r0 []*core.Nonce var r1 *ffapi.FilterResult var r2 error @@ -1546,6 +1934,10 @@ func (_m *Plugin) GetNonces(ctx context.Context, filter ffapi.Filter) ([]*core.N func (_m *Plugin) GetOffset(ctx context.Context, t fftypes.FFEnum, name string) (*core.Offset, error) { ret := _m.Called(ctx, t, name) + if len(ret) == 0 { + panic("no return value specified for GetOffset") + } + var r0 *core.Offset var r1 error if rf, ok := ret.Get(0).(func(context.Context, fftypes.FFEnum, string) (*core.Offset, error)); ok { @@ -1572,6 +1964,10 @@ func (_m *Plugin) GetOffset(ctx context.Context, t fftypes.FFEnum, name string) func (_m *Plugin) GetOffsets(ctx context.Context, filter ffapi.Filter) ([]*core.Offset, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetOffsets") + } + var r0 []*core.Offset var r1 *ffapi.FilterResult var r2 error @@ -1607,6 +2003,10 @@ func (_m *Plugin) GetOffsets(ctx context.Context, filter ffapi.Filter) ([]*core. func (_m *Plugin) GetOperationByID(ctx context.Context, namespace string, id *fftypes.UUID) (*core.Operation, error) { ret := _m.Called(ctx, namespace, id) + if len(ret) == 0 { + panic("no return value specified for GetOperationByID") + } + var r0 *core.Operation var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID) (*core.Operation, error)); ok { @@ -1633,6 +2033,10 @@ func (_m *Plugin) GetOperationByID(ctx context.Context, namespace string, id *ff func (_m *Plugin) GetOperations(ctx context.Context, namespace string, filter ffapi.Filter) ([]*core.Operation, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, filter) + if len(ret) == 0 { + panic("no return value specified for GetOperations") + } + var r0 []*core.Operation var r1 *ffapi.FilterResult var r2 error @@ -1668,6 +2072,10 @@ func (_m *Plugin) GetOperations(ctx context.Context, namespace string, filter ff func (_m *Plugin) GetPins(ctx context.Context, namespace string, filter ffapi.Filter) ([]*core.Pin, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, filter) + if len(ret) == 0 { + panic("no return value specified for GetPins") + } + var r0 []*core.Pin var r1 *ffapi.FilterResult var r2 error @@ -1703,6 +2111,10 @@ func (_m *Plugin) GetPins(ctx context.Context, namespace string, filter ffapi.Fi func (_m *Plugin) GetSubscriptionByID(ctx context.Context, namespace string, id *fftypes.UUID) (*core.Subscription, error) { ret := _m.Called(ctx, namespace, id) + if len(ret) == 0 { + panic("no return value specified for GetSubscriptionByID") + } + var r0 *core.Subscription var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID) (*core.Subscription, error)); ok { @@ -1729,6 +2141,10 @@ func (_m *Plugin) GetSubscriptionByID(ctx context.Context, namespace string, id func (_m *Plugin) GetSubscriptionByName(ctx context.Context, namespace string, name string) (*core.Subscription, error) { ret := _m.Called(ctx, namespace, name) + if len(ret) == 0 { + panic("no return value specified for GetSubscriptionByName") + } + var r0 *core.Subscription var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, string) (*core.Subscription, error)); ok { @@ -1755,6 +2171,10 @@ func (_m *Plugin) GetSubscriptionByName(ctx context.Context, namespace string, n func (_m *Plugin) GetSubscriptions(ctx context.Context, namespace string, filter ffapi.Filter) ([]*core.Subscription, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, filter) + if len(ret) == 0 { + panic("no return value specified for GetSubscriptions") + } + var r0 []*core.Subscription var r1 *ffapi.FilterResult var r2 error @@ -1790,6 +2210,10 @@ func (_m *Plugin) GetSubscriptions(ctx context.Context, namespace string, filter func (_m *Plugin) GetTokenAccountPools(ctx context.Context, namespace string, key string, filter ffapi.Filter) ([]*core.TokenAccountPool, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, key, filter) + if len(ret) == 0 { + panic("no return value specified for GetTokenAccountPools") + } + var r0 []*core.TokenAccountPool var r1 *ffapi.FilterResult var r2 error @@ -1825,6 +2249,10 @@ func (_m *Plugin) GetTokenAccountPools(ctx context.Context, namespace string, ke func (_m *Plugin) GetTokenAccounts(ctx context.Context, namespace string, filter ffapi.Filter) ([]*core.TokenAccount, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, filter) + if len(ret) == 0 { + panic("no return value specified for GetTokenAccounts") + } + var r0 []*core.TokenAccount var r1 *ffapi.FilterResult var r2 error @@ -1860,6 +2288,10 @@ func (_m *Plugin) GetTokenAccounts(ctx context.Context, namespace string, filter func (_m *Plugin) GetTokenApprovalByID(ctx context.Context, namespace string, localID *fftypes.UUID) (*core.TokenApproval, error) { ret := _m.Called(ctx, namespace, localID) + if len(ret) == 0 { + panic("no return value specified for GetTokenApprovalByID") + } + var r0 *core.TokenApproval var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID) (*core.TokenApproval, error)); ok { @@ -1882,25 +2314,29 @@ func (_m *Plugin) GetTokenApprovalByID(ctx context.Context, namespace string, lo return r0, r1 } -// GetTokenApprovalByProtocolID provides a mock function with given fields: ctx, namespace, connector, protocolID -func (_m *Plugin) GetTokenApprovalByProtocolID(ctx context.Context, namespace string, connector string, protocolID string) (*core.TokenApproval, error) { - ret := _m.Called(ctx, namespace, connector, protocolID) +// GetTokenApprovalByProtocolID provides a mock function with given fields: ctx, namespace, poolID, protocolID +func (_m *Plugin) GetTokenApprovalByProtocolID(ctx context.Context, namespace string, poolID *fftypes.UUID, protocolID string) (*core.TokenApproval, error) { + ret := _m.Called(ctx, namespace, poolID, protocolID) + + if len(ret) == 0 { + panic("no return value specified for GetTokenApprovalByProtocolID") + } var r0 *core.TokenApproval var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, string, string) (*core.TokenApproval, error)); ok { - return rf(ctx, namespace, connector, protocolID) + if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID, string) (*core.TokenApproval, error)); ok { + return rf(ctx, namespace, poolID, protocolID) } - if rf, ok := ret.Get(0).(func(context.Context, string, string, string) *core.TokenApproval); ok { - r0 = rf(ctx, namespace, connector, protocolID) + if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID, string) *core.TokenApproval); ok { + r0 = rf(ctx, namespace, poolID, protocolID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*core.TokenApproval) } } - if rf, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok { - r1 = rf(ctx, namespace, connector, protocolID) + if rf, ok := ret.Get(1).(func(context.Context, string, *fftypes.UUID, string) error); ok { + r1 = rf(ctx, namespace, poolID, protocolID) } else { r1 = ret.Error(1) } @@ -1912,6 +2348,10 @@ func (_m *Plugin) GetTokenApprovalByProtocolID(ctx context.Context, namespace st func (_m *Plugin) GetTokenApprovals(ctx context.Context, namespace string, filter ffapi.Filter) ([]*core.TokenApproval, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, filter) + if len(ret) == 0 { + panic("no return value specified for GetTokenApprovals") + } + var r0 []*core.TokenApproval var r1 *ffapi.FilterResult var r2 error @@ -1947,6 +2387,10 @@ func (_m *Plugin) GetTokenApprovals(ctx context.Context, namespace string, filte func (_m *Plugin) GetTokenBalance(ctx context.Context, namespace string, poolID *fftypes.UUID, tokenIndex string, identity string) (*core.TokenBalance, error) { ret := _m.Called(ctx, namespace, poolID, tokenIndex, identity) + if len(ret) == 0 { + panic("no return value specified for GetTokenBalance") + } + var r0 *core.TokenBalance var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID, string, string) (*core.TokenBalance, error)); ok { @@ -1973,6 +2417,10 @@ func (_m *Plugin) GetTokenBalance(ctx context.Context, namespace string, poolID func (_m *Plugin) GetTokenBalances(ctx context.Context, namespace string, filter ffapi.Filter) ([]*core.TokenBalance, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, filter) + if len(ret) == 0 { + panic("no return value specified for GetTokenBalances") + } + var r0 []*core.TokenBalance var r1 *ffapi.FilterResult var r2 error @@ -2008,6 +2456,10 @@ func (_m *Plugin) GetTokenBalances(ctx context.Context, namespace string, filter func (_m *Plugin) GetTokenPool(ctx context.Context, namespace string, name string) (*core.TokenPool, error) { ret := _m.Called(ctx, namespace, name) + if len(ret) == 0 { + panic("no return value specified for GetTokenPool") + } + var r0 *core.TokenPool var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, string) (*core.TokenPool, error)); ok { @@ -2034,6 +2486,10 @@ func (_m *Plugin) GetTokenPool(ctx context.Context, namespace string, name strin func (_m *Plugin) GetTokenPoolByID(ctx context.Context, namespace string, id *fftypes.UUID) (*core.TokenPool, error) { ret := _m.Called(ctx, namespace, id) + if len(ret) == 0 { + panic("no return value specified for GetTokenPoolByID") + } + var r0 *core.TokenPool var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID) (*core.TokenPool, error)); ok { @@ -2056,25 +2512,29 @@ func (_m *Plugin) GetTokenPoolByID(ctx context.Context, namespace string, id *ff return r0, r1 } -// GetTokenPoolByLocator provides a mock function with given fields: ctx, namespace, connector, locator -func (_m *Plugin) GetTokenPoolByLocator(ctx context.Context, namespace string, connector string, locator string) (*core.TokenPool, error) { - ret := _m.Called(ctx, namespace, connector, locator) +// GetTokenPoolByNetworkName provides a mock function with given fields: ctx, namespace, networkName +func (_m *Plugin) GetTokenPoolByNetworkName(ctx context.Context, namespace string, networkName string) (*core.TokenPool, error) { + ret := _m.Called(ctx, namespace, networkName) + + if len(ret) == 0 { + panic("no return value specified for GetTokenPoolByNetworkName") + } var r0 *core.TokenPool var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, string, string) (*core.TokenPool, error)); ok { - return rf(ctx, namespace, connector, locator) + if rf, ok := ret.Get(0).(func(context.Context, string, string) (*core.TokenPool, error)); ok { + return rf(ctx, namespace, networkName) } - if rf, ok := ret.Get(0).(func(context.Context, string, string, string) *core.TokenPool); ok { - r0 = rf(ctx, namespace, connector, locator) + if rf, ok := ret.Get(0).(func(context.Context, string, string) *core.TokenPool); ok { + r0 = rf(ctx, namespace, networkName) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*core.TokenPool) } } - if rf, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok { - r1 = rf(ctx, namespace, connector, locator) + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, namespace, networkName) } else { r1 = ret.Error(1) } @@ -2086,6 +2546,10 @@ func (_m *Plugin) GetTokenPoolByLocator(ctx context.Context, namespace string, c func (_m *Plugin) GetTokenPools(ctx context.Context, namespace string, filter ffapi.Filter) ([]*core.TokenPool, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, filter) + if len(ret) == 0 { + panic("no return value specified for GetTokenPools") + } + var r0 []*core.TokenPool var r1 *ffapi.FilterResult var r2 error @@ -2121,6 +2585,10 @@ func (_m *Plugin) GetTokenPools(ctx context.Context, namespace string, filter ff func (_m *Plugin) GetTokenTransferByID(ctx context.Context, namespace string, localID *fftypes.UUID) (*core.TokenTransfer, error) { ret := _m.Called(ctx, namespace, localID) + if len(ret) == 0 { + panic("no return value specified for GetTokenTransferByID") + } + var r0 *core.TokenTransfer var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID) (*core.TokenTransfer, error)); ok { @@ -2143,25 +2611,29 @@ func (_m *Plugin) GetTokenTransferByID(ctx context.Context, namespace string, lo return r0, r1 } -// GetTokenTransferByProtocolID provides a mock function with given fields: ctx, namespace, connector, protocolID -func (_m *Plugin) GetTokenTransferByProtocolID(ctx context.Context, namespace string, connector string, protocolID string) (*core.TokenTransfer, error) { - ret := _m.Called(ctx, namespace, connector, protocolID) +// GetTokenTransferByProtocolID provides a mock function with given fields: ctx, namespace, poolID, protocolID +func (_m *Plugin) GetTokenTransferByProtocolID(ctx context.Context, namespace string, poolID *fftypes.UUID, protocolID string) (*core.TokenTransfer, error) { + ret := _m.Called(ctx, namespace, poolID, protocolID) + + if len(ret) == 0 { + panic("no return value specified for GetTokenTransferByProtocolID") + } var r0 *core.TokenTransfer var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, string, string) (*core.TokenTransfer, error)); ok { - return rf(ctx, namespace, connector, protocolID) + if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID, string) (*core.TokenTransfer, error)); ok { + return rf(ctx, namespace, poolID, protocolID) } - if rf, ok := ret.Get(0).(func(context.Context, string, string, string) *core.TokenTransfer); ok { - r0 = rf(ctx, namespace, connector, protocolID) + if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID, string) *core.TokenTransfer); ok { + r0 = rf(ctx, namespace, poolID, protocolID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*core.TokenTransfer) } } - if rf, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok { - r1 = rf(ctx, namespace, connector, protocolID) + if rf, ok := ret.Get(1).(func(context.Context, string, *fftypes.UUID, string) error); ok { + r1 = rf(ctx, namespace, poolID, protocolID) } else { r1 = ret.Error(1) } @@ -2173,6 +2645,10 @@ func (_m *Plugin) GetTokenTransferByProtocolID(ctx context.Context, namespace st func (_m *Plugin) GetTokenTransfers(ctx context.Context, namespace string, filter ffapi.Filter) ([]*core.TokenTransfer, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, filter) + if len(ret) == 0 { + panic("no return value specified for GetTokenTransfers") + } + var r0 []*core.TokenTransfer var r1 *ffapi.FilterResult var r2 error @@ -2208,6 +2684,10 @@ func (_m *Plugin) GetTokenTransfers(ctx context.Context, namespace string, filte func (_m *Plugin) GetTransactionByID(ctx context.Context, namespace string, id *fftypes.UUID) (*core.Transaction, error) { ret := _m.Called(ctx, namespace, id) + if len(ret) == 0 { + panic("no return value specified for GetTransactionByID") + } + var r0 *core.Transaction var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID) (*core.Transaction, error)); ok { @@ -2234,6 +2714,10 @@ func (_m *Plugin) GetTransactionByID(ctx context.Context, namespace string, id * func (_m *Plugin) GetTransactions(ctx context.Context, namespace string, filter ffapi.Filter) ([]*core.Transaction, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, filter) + if len(ret) == 0 { + panic("no return value specified for GetTransactions") + } + var r0 []*core.Transaction var r1 *ffapi.FilterResult var r2 error @@ -2269,6 +2753,10 @@ func (_m *Plugin) GetTransactions(ctx context.Context, namespace string, filter func (_m *Plugin) GetVerifierByHash(ctx context.Context, namespace string, hash *fftypes.Bytes32) (*core.Verifier, error) { ret := _m.Called(ctx, namespace, hash) + if len(ret) == 0 { + panic("no return value specified for GetVerifierByHash") + } + var r0 *core.Verifier var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.Bytes32) (*core.Verifier, error)); ok { @@ -2295,6 +2783,10 @@ func (_m *Plugin) GetVerifierByHash(ctx context.Context, namespace string, hash func (_m *Plugin) GetVerifierByValue(ctx context.Context, vType fftypes.FFEnum, namespace string, value string) (*core.Verifier, error) { ret := _m.Called(ctx, vType, namespace, value) + if len(ret) == 0 { + panic("no return value specified for GetVerifierByValue") + } + var r0 *core.Verifier var r1 error if rf, ok := ret.Get(0).(func(context.Context, fftypes.FFEnum, string, string) (*core.Verifier, error)); ok { @@ -2321,6 +2813,10 @@ func (_m *Plugin) GetVerifierByValue(ctx context.Context, vType fftypes.FFEnum, func (_m *Plugin) GetVerifiers(ctx context.Context, namespace string, filter ffapi.Filter) ([]*core.Verifier, *ffapi.FilterResult, error) { ret := _m.Called(ctx, namespace, filter) + if len(ret) == 0 { + panic("no return value specified for GetVerifiers") + } + var r0 []*core.Verifier var r1 *ffapi.FilterResult var r2 error @@ -2356,6 +2852,10 @@ func (_m *Plugin) GetVerifiers(ctx context.Context, namespace string, filter ffa func (_m *Plugin) Init(ctx context.Context, _a1 config.Section) error { ret := _m.Called(ctx, _a1) + if len(ret) == 0 { + panic("no return value specified for Init") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, config.Section) error); ok { r0 = rf(ctx, _a1) @@ -2375,6 +2875,10 @@ func (_m *Plugin) InitConfig(_a0 config.Section) { func (_m *Plugin) InsertBlob(ctx context.Context, blob *core.Blob) error { ret := _m.Called(ctx, blob) + if len(ret) == 0 { + panic("no return value specified for InsertBlob") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.Blob) error); ok { r0 = rf(ctx, blob) @@ -2389,6 +2893,10 @@ func (_m *Plugin) InsertBlob(ctx context.Context, blob *core.Blob) error { func (_m *Plugin) InsertBlobs(ctx context.Context, blobs []*core.Blob) error { ret := _m.Called(ctx, blobs) + if len(ret) == 0 { + panic("no return value specified for InsertBlobs") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, []*core.Blob) error); ok { r0 = rf(ctx, blobs) @@ -2399,10 +2907,39 @@ func (_m *Plugin) InsertBlobs(ctx context.Context, blobs []*core.Blob) error { return r0 } +// InsertBlockchainEvents provides a mock function with given fields: ctx, messages, hooks +func (_m *Plugin) InsertBlockchainEvents(ctx context.Context, messages []*core.BlockchainEvent, hooks ...database.PostCompletionHook) error { + _va := make([]interface{}, len(hooks)) + for _i := range hooks { + _va[_i] = hooks[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, messages) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for InsertBlockchainEvents") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []*core.BlockchainEvent, ...database.PostCompletionHook) error); ok { + r0 = rf(ctx, messages, hooks...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // InsertContractListener provides a mock function with given fields: ctx, sub func (_m *Plugin) InsertContractListener(ctx context.Context, sub *core.ContractListener) error { ret := _m.Called(ctx, sub) + if len(ret) == 0 { + panic("no return value specified for InsertContractListener") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.ContractListener) error); ok { r0 = rf(ctx, sub) @@ -2417,6 +2954,10 @@ func (_m *Plugin) InsertContractListener(ctx context.Context, sub *core.Contract func (_m *Plugin) InsertDataArray(ctx context.Context, data core.DataArray) error { ret := _m.Called(ctx, data) + if len(ret) == 0 { + panic("no return value specified for InsertDataArray") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, core.DataArray) error); ok { r0 = rf(ctx, data) @@ -2431,6 +2972,10 @@ func (_m *Plugin) InsertDataArray(ctx context.Context, data core.DataArray) erro func (_m *Plugin) InsertEvent(ctx context.Context, data *core.Event) error { ret := _m.Called(ctx, data) + if len(ret) == 0 { + panic("no return value specified for InsertEvent") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.Event) error); ok { r0 = rf(ctx, data) @@ -2452,6 +2997,10 @@ func (_m *Plugin) InsertMessages(ctx context.Context, messages []*core.Message, _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for InsertMessages") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, []*core.Message, ...database.PostCompletionHook) error); ok { r0 = rf(ctx, messages, hooks...) @@ -2466,6 +3015,10 @@ func (_m *Plugin) InsertMessages(ctx context.Context, messages []*core.Message, func (_m *Plugin) InsertNextPin(ctx context.Context, nextpin *core.NextPin) error { ret := _m.Called(ctx, nextpin) + if len(ret) == 0 { + panic("no return value specified for InsertNextPin") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.NextPin) error); ok { r0 = rf(ctx, nextpin) @@ -2480,6 +3033,10 @@ func (_m *Plugin) InsertNextPin(ctx context.Context, nextpin *core.NextPin) erro func (_m *Plugin) InsertNonce(ctx context.Context, nonce *core.Nonce) error { ret := _m.Called(ctx, nonce) + if len(ret) == 0 { + panic("no return value specified for InsertNonce") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.Nonce) error); ok { r0 = rf(ctx, nonce) @@ -2501,6 +3058,10 @@ func (_m *Plugin) InsertOperation(ctx context.Context, operation *core.Operation _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for InsertOperation") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.Operation, ...database.PostCompletionHook) error); ok { r0 = rf(ctx, operation, hooks...) @@ -2511,10 +3072,39 @@ func (_m *Plugin) InsertOperation(ctx context.Context, operation *core.Operation return r0 } +// InsertOperations provides a mock function with given fields: ctx, ops, hooks +func (_m *Plugin) InsertOperations(ctx context.Context, ops []*core.Operation, hooks ...database.PostCompletionHook) error { + _va := make([]interface{}, len(hooks)) + for _i := range hooks { + _va[_i] = hooks[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, ops) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for InsertOperations") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []*core.Operation, ...database.PostCompletionHook) error); ok { + r0 = rf(ctx, ops, hooks...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // InsertOrGetBatch provides a mock function with given fields: ctx, data func (_m *Plugin) InsertOrGetBatch(ctx context.Context, data *core.BatchPersisted) (*core.BatchPersisted, error) { ret := _m.Called(ctx, data) + if len(ret) == 0 { + panic("no return value specified for InsertOrGetBatch") + } + var r0 *core.BatchPersisted var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.BatchPersisted) (*core.BatchPersisted, error)); ok { @@ -2541,6 +3131,10 @@ func (_m *Plugin) InsertOrGetBatch(ctx context.Context, data *core.BatchPersiste func (_m *Plugin) InsertOrGetBlockchainEvent(ctx context.Context, event *core.BlockchainEvent) (*core.BlockchainEvent, error) { ret := _m.Called(ctx, event) + if len(ret) == 0 { + panic("no return value specified for InsertOrGetBlockchainEvent") + } + var r0 *core.BlockchainEvent var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.BlockchainEvent) (*core.BlockchainEvent, error)); ok { @@ -2563,10 +3157,134 @@ func (_m *Plugin) InsertOrGetBlockchainEvent(ctx context.Context, event *core.Bl return r0, r1 } +// InsertOrGetContractAPI provides a mock function with given fields: ctx, api +func (_m *Plugin) InsertOrGetContractAPI(ctx context.Context, api *core.ContractAPI) (*core.ContractAPI, error) { + ret := _m.Called(ctx, api) + + if len(ret) == 0 { + panic("no return value specified for InsertOrGetContractAPI") + } + + var r0 *core.ContractAPI + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *core.ContractAPI) (*core.ContractAPI, error)); ok { + return rf(ctx, api) + } + if rf, ok := ret.Get(0).(func(context.Context, *core.ContractAPI) *core.ContractAPI); ok { + r0 = rf(ctx, api) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*core.ContractAPI) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *core.ContractAPI) error); ok { + r1 = rf(ctx, api) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// InsertOrGetFFI provides a mock function with given fields: ctx, ffi +func (_m *Plugin) InsertOrGetFFI(ctx context.Context, ffi *fftypes.FFI) (*fftypes.FFI, error) { + ret := _m.Called(ctx, ffi) + + if len(ret) == 0 { + panic("no return value specified for InsertOrGetFFI") + } + + var r0 *fftypes.FFI + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.FFI) (*fftypes.FFI, error)); ok { + return rf(ctx, ffi) + } + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.FFI) *fftypes.FFI); ok { + r0 = rf(ctx, ffi) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*fftypes.FFI) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.FFI) error); ok { + r1 = rf(ctx, ffi) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// InsertOrGetTokenPool provides a mock function with given fields: ctx, pool +func (_m *Plugin) InsertOrGetTokenPool(ctx context.Context, pool *core.TokenPool) (*core.TokenPool, error) { + ret := _m.Called(ctx, pool) + + if len(ret) == 0 { + panic("no return value specified for InsertOrGetTokenPool") + } + + var r0 *core.TokenPool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *core.TokenPool) (*core.TokenPool, error)); ok { + return rf(ctx, pool) + } + if rf, ok := ret.Get(0).(func(context.Context, *core.TokenPool) *core.TokenPool); ok { + r0 = rf(ctx, pool) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*core.TokenPool) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *core.TokenPool) error); ok { + r1 = rf(ctx, pool) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// InsertOrGetTokenTransfer provides a mock function with given fields: ctx, approval +func (_m *Plugin) InsertOrGetTokenTransfer(ctx context.Context, approval *core.TokenTransfer) (*core.TokenTransfer, error) { + ret := _m.Called(ctx, approval) + + if len(ret) == 0 { + panic("no return value specified for InsertOrGetTokenTransfer") + } + + var r0 *core.TokenTransfer + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *core.TokenTransfer) (*core.TokenTransfer, error)); ok { + return rf(ctx, approval) + } + if rf, ok := ret.Get(0).(func(context.Context, *core.TokenTransfer) *core.TokenTransfer); ok { + r0 = rf(ctx, approval) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*core.TokenTransfer) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *core.TokenTransfer) error); ok { + r1 = rf(ctx, approval) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // InsertPins provides a mock function with given fields: ctx, pins func (_m *Plugin) InsertPins(ctx context.Context, pins []*core.Pin) error { ret := _m.Called(ctx, pins) + if len(ret) == 0 { + panic("no return value specified for InsertPins") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, []*core.Pin) error); ok { r0 = rf(ctx, pins) @@ -2577,13 +3295,35 @@ func (_m *Plugin) InsertPins(ctx context.Context, pins []*core.Pin) error { return r0 } -// InsertTransaction provides a mock function with given fields: ctx, data -func (_m *Plugin) InsertTransaction(ctx context.Context, data *core.Transaction) error { - ret := _m.Called(ctx, data) +// InsertTransaction provides a mock function with given fields: ctx, txn +func (_m *Plugin) InsertTransaction(ctx context.Context, txn *core.Transaction) error { + ret := _m.Called(ctx, txn) + + if len(ret) == 0 { + panic("no return value specified for InsertTransaction") + } var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.Transaction) error); ok { - r0 = rf(ctx, data) + r0 = rf(ctx, txn) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// InsertTransactions provides a mock function with given fields: ctx, txns +func (_m *Plugin) InsertTransactions(ctx context.Context, txns []*core.Transaction) error { + ret := _m.Called(ctx, txns) + + if len(ret) == 0 { + panic("no return value specified for InsertTransactions") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []*core.Transaction) error); ok { + r0 = rf(ctx, txns) } else { r0 = ret.Error(0) } @@ -2595,6 +3335,10 @@ func (_m *Plugin) InsertTransaction(ctx context.Context, data *core.Transaction) func (_m *Plugin) Name() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Name") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -2609,6 +3353,10 @@ func (_m *Plugin) Name() string { func (_m *Plugin) ReplaceMessage(ctx context.Context, message *core.Message) error { ret := _m.Called(ctx, message) + if len(ret) == 0 { + panic("no return value specified for ReplaceMessage") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.Message) error); ok { r0 = rf(ctx, message) @@ -2623,6 +3371,10 @@ func (_m *Plugin) ReplaceMessage(ctx context.Context, message *core.Message) err func (_m *Plugin) RunAsGroup(ctx context.Context, fn func(context.Context) error) error { ret := _m.Called(ctx, fn) + if len(ret) == 0 { + panic("no return value specified for RunAsGroup") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, func(context.Context) error) error); ok { r0 = rf(ctx, fn) @@ -2642,6 +3394,10 @@ func (_m *Plugin) SetHandler(namespace string, handler database.Callbacks) { func (_m *Plugin) UpdateBatch(ctx context.Context, namespace string, id *fftypes.UUID, update ffapi.Update) error { ret := _m.Called(ctx, namespace, id, update) + if len(ret) == 0 { + panic("no return value specified for UpdateBatch") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID, ffapi.Update) error); ok { r0 = rf(ctx, namespace, id, update) @@ -2656,6 +3412,10 @@ func (_m *Plugin) UpdateBatch(ctx context.Context, namespace string, id *fftypes func (_m *Plugin) UpdateContractListener(ctx context.Context, namespace string, id *fftypes.UUID, update ffapi.Update) error { ret := _m.Called(ctx, namespace, id, update) + if len(ret) == 0 { + panic("no return value specified for UpdateContractListener") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID, ffapi.Update) error); ok { r0 = rf(ctx, namespace, id, update) @@ -2670,6 +3430,10 @@ func (_m *Plugin) UpdateContractListener(ctx context.Context, namespace string, func (_m *Plugin) UpdateData(ctx context.Context, namespace string, id *fftypes.UUID, update ffapi.Update) error { ret := _m.Called(ctx, namespace, id, update) + if len(ret) == 0 { + panic("no return value specified for UpdateData") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID, ffapi.Update) error); ok { r0 = rf(ctx, namespace, id, update) @@ -2684,6 +3448,10 @@ func (_m *Plugin) UpdateData(ctx context.Context, namespace string, id *fftypes. func (_m *Plugin) UpdateMessage(ctx context.Context, namespace string, id *fftypes.UUID, update ffapi.Update) error { ret := _m.Called(ctx, namespace, id, update) + if len(ret) == 0 { + panic("no return value specified for UpdateMessage") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID, ffapi.Update) error); ok { r0 = rf(ctx, namespace, id, update) @@ -2698,6 +3466,10 @@ func (_m *Plugin) UpdateMessage(ctx context.Context, namespace string, id *fftyp func (_m *Plugin) UpdateMessages(ctx context.Context, namespace string, filter ffapi.Filter, update ffapi.Update) error { ret := _m.Called(ctx, namespace, filter, update) + if len(ret) == 0 { + panic("no return value specified for UpdateMessages") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, ffapi.Filter, ffapi.Update) error); ok { r0 = rf(ctx, namespace, filter, update) @@ -2712,6 +3484,10 @@ func (_m *Plugin) UpdateMessages(ctx context.Context, namespace string, filter f func (_m *Plugin) UpdateNextPin(ctx context.Context, namespace string, sequence int64, update ffapi.Update) error { ret := _m.Called(ctx, namespace, sequence, update) + if len(ret) == 0 { + panic("no return value specified for UpdateNextPin") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, int64, ffapi.Update) error); ok { r0 = rf(ctx, namespace, sequence, update) @@ -2726,6 +3502,10 @@ func (_m *Plugin) UpdateNextPin(ctx context.Context, namespace string, sequence func (_m *Plugin) UpdateNonce(ctx context.Context, nonce *core.Nonce) error { ret := _m.Called(ctx, nonce) + if len(ret) == 0 { + panic("no return value specified for UpdateNonce") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.Nonce) error); ok { r0 = rf(ctx, nonce) @@ -2740,6 +3520,10 @@ func (_m *Plugin) UpdateNonce(ctx context.Context, nonce *core.Nonce) error { func (_m *Plugin) UpdateOffset(ctx context.Context, rowID int64, update ffapi.Update) error { ret := _m.Called(ctx, rowID, update) + if len(ret) == 0 { + panic("no return value specified for UpdateOffset") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, int64, ffapi.Update) error); ok { r0 = rf(ctx, rowID, update) @@ -2754,6 +3538,10 @@ func (_m *Plugin) UpdateOffset(ctx context.Context, rowID int64, update ffapi.Up func (_m *Plugin) UpdateOperation(ctx context.Context, namespace string, id *fftypes.UUID, filter ffapi.Filter, update ffapi.Update) (bool, error) { ret := _m.Called(ctx, namespace, id, filter, update) + if len(ret) == 0 { + panic("no return value specified for UpdateOperation") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID, ffapi.Filter, ffapi.Update) (bool, error)); ok { @@ -2778,6 +3566,10 @@ func (_m *Plugin) UpdateOperation(ctx context.Context, namespace string, id *fft func (_m *Plugin) UpdatePins(ctx context.Context, namespace string, filter ffapi.Filter, update ffapi.Update) error { ret := _m.Called(ctx, namespace, filter, update) + if len(ret) == 0 { + panic("no return value specified for UpdatePins") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, ffapi.Filter, ffapi.Update) error); ok { r0 = rf(ctx, namespace, filter, update) @@ -2792,6 +3584,10 @@ func (_m *Plugin) UpdatePins(ctx context.Context, namespace string, filter ffapi func (_m *Plugin) UpdateSubscription(ctx context.Context, namespace string, name string, update ffapi.Update) error { ret := _m.Called(ctx, namespace, name, update) + if len(ret) == 0 { + panic("no return value specified for UpdateSubscription") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, string, ffapi.Update) error); ok { r0 = rf(ctx, namespace, name, update) @@ -2806,6 +3602,10 @@ func (_m *Plugin) UpdateSubscription(ctx context.Context, namespace string, name func (_m *Plugin) UpdateTokenApprovals(ctx context.Context, filter ffapi.Filter, update ffapi.Update) error { ret := _m.Called(ctx, filter, update) + if len(ret) == 0 { + panic("no return value specified for UpdateTokenApprovals") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, ffapi.Filter, ffapi.Update) error); ok { r0 = rf(ctx, filter, update) @@ -2820,6 +3620,10 @@ func (_m *Plugin) UpdateTokenApprovals(ctx context.Context, filter ffapi.Filter, func (_m *Plugin) UpdateTokenBalances(ctx context.Context, transfer *core.TokenTransfer) error { ret := _m.Called(ctx, transfer) + if len(ret) == 0 { + panic("no return value specified for UpdateTokenBalances") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.TokenTransfer) error); ok { r0 = rf(ctx, transfer) @@ -2834,6 +3638,10 @@ func (_m *Plugin) UpdateTokenBalances(ctx context.Context, transfer *core.TokenT func (_m *Plugin) UpdateTransaction(ctx context.Context, namespace string, id *fftypes.UUID, update ffapi.Update) error { ret := _m.Called(ctx, namespace, id, update) + if len(ret) == 0 { + panic("no return value specified for UpdateTransaction") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID, ffapi.Update) error); ok { r0 = rf(ctx, namespace, id, update) @@ -2844,13 +3652,17 @@ func (_m *Plugin) UpdateTransaction(ctx context.Context, namespace string, id *f return r0 } -// UpsertContractAPI provides a mock function with given fields: ctx, cd -func (_m *Plugin) UpsertContractAPI(ctx context.Context, cd *core.ContractAPI) error { - ret := _m.Called(ctx, cd) +// UpsertContractAPI provides a mock function with given fields: ctx, api, optimization +func (_m *Plugin) UpsertContractAPI(ctx context.Context, api *core.ContractAPI, optimization database.UpsertOptimization) error { + ret := _m.Called(ctx, api, optimization) + + if len(ret) == 0 { + panic("no return value specified for UpsertContractAPI") + } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *core.ContractAPI) error); ok { - r0 = rf(ctx, cd) + if rf, ok := ret.Get(0).(func(context.Context, *core.ContractAPI, database.UpsertOptimization) error); ok { + r0 = rf(ctx, api, optimization) } else { r0 = ret.Error(0) } @@ -2862,6 +3674,10 @@ func (_m *Plugin) UpsertContractAPI(ctx context.Context, cd *core.ContractAPI) e func (_m *Plugin) UpsertData(ctx context.Context, data *core.Data, optimization database.UpsertOptimization) error { ret := _m.Called(ctx, data, optimization) + if len(ret) == 0 { + panic("no return value specified for UpsertData") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.Data, database.UpsertOptimization) error); ok { r0 = rf(ctx, data, optimization) @@ -2876,6 +3692,10 @@ func (_m *Plugin) UpsertData(ctx context.Context, data *core.Data, optimization func (_m *Plugin) UpsertDatatype(ctx context.Context, datadef *core.Datatype, allowExisting bool) error { ret := _m.Called(ctx, datadef, allowExisting) + if len(ret) == 0 { + panic("no return value specified for UpsertDatatype") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.Datatype, bool) error); ok { r0 = rf(ctx, datadef, allowExisting) @@ -2886,13 +3706,17 @@ func (_m *Plugin) UpsertDatatype(ctx context.Context, datadef *core.Datatype, al return r0 } -// UpsertFFI provides a mock function with given fields: ctx, cd -func (_m *Plugin) UpsertFFI(ctx context.Context, cd *fftypes.FFI) error { - ret := _m.Called(ctx, cd) +// UpsertFFI provides a mock function with given fields: ctx, ffi, optimization +func (_m *Plugin) UpsertFFI(ctx context.Context, ffi *fftypes.FFI, optimization database.UpsertOptimization) error { + ret := _m.Called(ctx, ffi, optimization) + + if len(ret) == 0 { + panic("no return value specified for UpsertFFI") + } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *fftypes.FFI) error); ok { - r0 = rf(ctx, cd) + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.FFI, database.UpsertOptimization) error); ok { + r0 = rf(ctx, ffi, optimization) } else { r0 = ret.Error(0) } @@ -2904,6 +3728,10 @@ func (_m *Plugin) UpsertFFI(ctx context.Context, cd *fftypes.FFI) error { func (_m *Plugin) UpsertFFIError(ctx context.Context, method *fftypes.FFIError) error { ret := _m.Called(ctx, method) + if len(ret) == 0 { + panic("no return value specified for UpsertFFIError") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.FFIError) error); ok { r0 = rf(ctx, method) @@ -2918,6 +3746,10 @@ func (_m *Plugin) UpsertFFIError(ctx context.Context, method *fftypes.FFIError) func (_m *Plugin) UpsertFFIEvent(ctx context.Context, method *fftypes.FFIEvent) error { ret := _m.Called(ctx, method) + if len(ret) == 0 { + panic("no return value specified for UpsertFFIEvent") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.FFIEvent) error); ok { r0 = rf(ctx, method) @@ -2932,6 +3764,10 @@ func (_m *Plugin) UpsertFFIEvent(ctx context.Context, method *fftypes.FFIEvent) func (_m *Plugin) UpsertFFIMethod(ctx context.Context, method *fftypes.FFIMethod) error { ret := _m.Called(ctx, method) + if len(ret) == 0 { + panic("no return value specified for UpsertFFIMethod") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.FFIMethod) error); ok { r0 = rf(ctx, method) @@ -2946,6 +3782,10 @@ func (_m *Plugin) UpsertFFIMethod(ctx context.Context, method *fftypes.FFIMethod func (_m *Plugin) UpsertGroup(ctx context.Context, data *core.Group, optimization database.UpsertOptimization) error { ret := _m.Called(ctx, data, optimization) + if len(ret) == 0 { + panic("no return value specified for UpsertGroup") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.Group, database.UpsertOptimization) error); ok { r0 = rf(ctx, data, optimization) @@ -2960,6 +3800,10 @@ func (_m *Plugin) UpsertGroup(ctx context.Context, data *core.Group, optimizatio func (_m *Plugin) UpsertIdentity(ctx context.Context, data *core.Identity, optimization database.UpsertOptimization) error { ret := _m.Called(ctx, data, optimization) + if len(ret) == 0 { + panic("no return value specified for UpsertIdentity") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.Identity, database.UpsertOptimization) error); ok { r0 = rf(ctx, data, optimization) @@ -2981,6 +3825,10 @@ func (_m *Plugin) UpsertMessage(ctx context.Context, message *core.Message, opti _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for UpsertMessage") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.Message, database.UpsertOptimization, ...database.PostCompletionHook) error); ok { r0 = rf(ctx, message, optimization, hooks...) @@ -2995,6 +3843,10 @@ func (_m *Plugin) UpsertMessage(ctx context.Context, message *core.Message, opti func (_m *Plugin) UpsertNamespace(ctx context.Context, data *core.Namespace, allowExisting bool) error { ret := _m.Called(ctx, data, allowExisting) + if len(ret) == 0 { + panic("no return value specified for UpsertNamespace") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.Namespace, bool) error); ok { r0 = rf(ctx, data, allowExisting) @@ -3009,6 +3861,10 @@ func (_m *Plugin) UpsertNamespace(ctx context.Context, data *core.Namespace, all func (_m *Plugin) UpsertOffset(ctx context.Context, data *core.Offset, allowExisting bool) error { ret := _m.Called(ctx, data, allowExisting) + if len(ret) == 0 { + panic("no return value specified for UpsertOffset") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.Offset, bool) error); ok { r0 = rf(ctx, data, allowExisting) @@ -3023,6 +3879,10 @@ func (_m *Plugin) UpsertOffset(ctx context.Context, data *core.Offset, allowExis func (_m *Plugin) UpsertPin(ctx context.Context, parked *core.Pin) error { ret := _m.Called(ctx, parked) + if len(ret) == 0 { + panic("no return value specified for UpsertPin") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.Pin) error); ok { r0 = rf(ctx, parked) @@ -3037,6 +3897,10 @@ func (_m *Plugin) UpsertPin(ctx context.Context, parked *core.Pin) error { func (_m *Plugin) UpsertSubscription(ctx context.Context, data *core.Subscription, allowExisting bool) error { ret := _m.Called(ctx, data, allowExisting) + if len(ret) == 0 { + panic("no return value specified for UpsertSubscription") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.Subscription, bool) error); ok { r0 = rf(ctx, data, allowExisting) @@ -3051,6 +3915,10 @@ func (_m *Plugin) UpsertSubscription(ctx context.Context, data *core.Subscriptio func (_m *Plugin) UpsertTokenApproval(ctx context.Context, approval *core.TokenApproval) error { ret := _m.Called(ctx, approval) + if len(ret) == 0 { + panic("no return value specified for UpsertTokenApproval") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.TokenApproval) error); ok { r0 = rf(ctx, approval) @@ -3061,27 +3929,17 @@ func (_m *Plugin) UpsertTokenApproval(ctx context.Context, approval *core.TokenA return r0 } -// UpsertTokenPool provides a mock function with given fields: ctx, pool -func (_m *Plugin) UpsertTokenPool(ctx context.Context, pool *core.TokenPool) error { - ret := _m.Called(ctx, pool) +// UpsertTokenPool provides a mock function with given fields: ctx, pool, optimization +func (_m *Plugin) UpsertTokenPool(ctx context.Context, pool *core.TokenPool, optimization database.UpsertOptimization) error { + ret := _m.Called(ctx, pool, optimization) - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *core.TokenPool) error); ok { - r0 = rf(ctx, pool) - } else { - r0 = ret.Error(0) + if len(ret) == 0 { + panic("no return value specified for UpsertTokenPool") } - return r0 -} - -// UpsertTokenTransfer provides a mock function with given fields: ctx, transfer -func (_m *Plugin) UpsertTokenTransfer(ctx context.Context, transfer *core.TokenTransfer) error { - ret := _m.Called(ctx, transfer) - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *core.TokenTransfer) error); ok { - r0 = rf(ctx, transfer) + if rf, ok := ret.Get(0).(func(context.Context, *core.TokenPool, database.UpsertOptimization) error); ok { + r0 = rf(ctx, pool, optimization) } else { r0 = ret.Error(0) } @@ -3093,6 +3951,10 @@ func (_m *Plugin) UpsertTokenTransfer(ctx context.Context, transfer *core.TokenT func (_m *Plugin) UpsertVerifier(ctx context.Context, data *core.Verifier, optimization database.UpsertOptimization) error { ret := _m.Called(ctx, data, optimization) + if len(ret) == 0 { + panic("no return value specified for UpsertVerifier") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.Verifier, database.UpsertOptimization) error); ok { r0 = rf(ctx, data, optimization) @@ -3103,13 +3965,12 @@ func (_m *Plugin) UpsertVerifier(ctx context.Context, data *core.Verifier, optim return r0 } -type mockConstructorTestingTNewPlugin interface { +// NewPlugin creates a new instance of Plugin. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPlugin(t interface { mock.TestingT Cleanup(func()) -} - -// NewPlugin creates a new instance of Plugin. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPlugin(t mockConstructorTestingTNewPlugin) *Plugin { +}) *Plugin { mock := &Plugin{} mock.Mock.Test(t) diff --git a/mocks/dataexchangemocks/callbacks.go b/mocks/dataexchangemocks/callbacks.go index e39bf49807..2d11b72f6d 100644 --- a/mocks/dataexchangemocks/callbacks.go +++ b/mocks/dataexchangemocks/callbacks.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package dataexchangemocks @@ -16,6 +16,10 @@ type Callbacks struct { func (_m *Callbacks) DXEvent(plugin dataexchange.Plugin, event dataexchange.DXEvent) error { ret := _m.Called(plugin, event) + if len(ret) == 0 { + panic("no return value specified for DXEvent") + } + var r0 error if rf, ok := ret.Get(0).(func(dataexchange.Plugin, dataexchange.DXEvent) error); ok { r0 = rf(plugin, event) @@ -26,13 +30,12 @@ func (_m *Callbacks) DXEvent(plugin dataexchange.Plugin, event dataexchange.DXEv return r0 } -type mockConstructorTestingTNewCallbacks interface { +// NewCallbacks creates a new instance of Callbacks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCallbacks(t interface { mock.TestingT Cleanup(func()) -} - -// NewCallbacks creates a new instance of Callbacks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCallbacks(t mockConstructorTestingTNewCallbacks) *Callbacks { +}) *Callbacks { mock := &Callbacks{} mock.Mock.Test(t) diff --git a/mocks/dataexchangemocks/dx_event.go b/mocks/dataexchangemocks/dx_event.go index e378893b49..200fe5e71e 100644 --- a/mocks/dataexchangemocks/dx_event.go +++ b/mocks/dataexchangemocks/dx_event.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package dataexchangemocks @@ -26,6 +26,10 @@ func (_m *DXEvent) AckWithManifest(manifest string) { func (_m *DXEvent) EventID() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for EventID") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -40,6 +44,10 @@ func (_m *DXEvent) EventID() string { func (_m *DXEvent) MessageReceived() *dataexchange.MessageReceived { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for MessageReceived") + } + var r0 *dataexchange.MessageReceived if rf, ok := ret.Get(0).(func() *dataexchange.MessageReceived); ok { r0 = rf() @@ -56,6 +64,10 @@ func (_m *DXEvent) MessageReceived() *dataexchange.MessageReceived { func (_m *DXEvent) PrivateBlobReceived() *dataexchange.PrivateBlobReceived { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for PrivateBlobReceived") + } + var r0 *dataexchange.PrivateBlobReceived if rf, ok := ret.Get(0).(func() *dataexchange.PrivateBlobReceived); ok { r0 = rf() @@ -72,6 +84,10 @@ func (_m *DXEvent) PrivateBlobReceived() *dataexchange.PrivateBlobReceived { func (_m *DXEvent) Type() dataexchange.DXEventType { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Type") + } + var r0 dataexchange.DXEventType if rf, ok := ret.Get(0).(func() dataexchange.DXEventType); ok { r0 = rf() @@ -82,13 +98,12 @@ func (_m *DXEvent) Type() dataexchange.DXEventType { return r0 } -type mockConstructorTestingTNewDXEvent interface { +// NewDXEvent creates a new instance of DXEvent. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDXEvent(t interface { mock.TestingT Cleanup(func()) -} - -// NewDXEvent creates a new instance of DXEvent. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewDXEvent(t mockConstructorTestingTNewDXEvent) *DXEvent { +}) *DXEvent { mock := &DXEvent{} mock.Mock.Test(t) diff --git a/mocks/dataexchangemocks/plugin.go b/mocks/dataexchangemocks/plugin.go index e9abd6b42b..e0e932143f 100644 --- a/mocks/dataexchangemocks/plugin.go +++ b/mocks/dataexchangemocks/plugin.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package dataexchangemocks @@ -27,6 +27,10 @@ type Plugin struct { func (_m *Plugin) AddNode(ctx context.Context, networkNamespace string, nodeName string, peer fftypes.JSONObject) error { ret := _m.Called(ctx, networkNamespace, nodeName, peer) + if len(ret) == 0 { + panic("no return value specified for AddNode") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, string, fftypes.JSONObject) error); ok { r0 = rf(ctx, networkNamespace, nodeName, peer) @@ -41,6 +45,10 @@ func (_m *Plugin) AddNode(ctx context.Context, networkNamespace string, nodeName func (_m *Plugin) Capabilities() *dataexchange.Capabilities { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Capabilities") + } + var r0 *dataexchange.Capabilities if rf, ok := ret.Get(0).(func() *dataexchange.Capabilities); ok { r0 = rf() @@ -57,6 +65,10 @@ func (_m *Plugin) Capabilities() *dataexchange.Capabilities { func (_m *Plugin) DeleteBlob(ctx context.Context, payloadRef string) error { ret := _m.Called(ctx, payloadRef) + if len(ret) == 0 { + panic("no return value specified for DeleteBlob") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { r0 = rf(ctx, payloadRef) @@ -71,6 +83,10 @@ func (_m *Plugin) DeleteBlob(ctx context.Context, payloadRef string) error { func (_m *Plugin) DownloadBlob(ctx context.Context, payloadRef string) (io.ReadCloser, error) { ret := _m.Called(ctx, payloadRef) + if len(ret) == 0 { + panic("no return value specified for DownloadBlob") + } + var r0 io.ReadCloser var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (io.ReadCloser, error)); ok { @@ -97,6 +113,10 @@ func (_m *Plugin) DownloadBlob(ctx context.Context, payloadRef string) (io.ReadC func (_m *Plugin) GetEndpointInfo(ctx context.Context, nodeName string) (fftypes.JSONObject, error) { ret := _m.Called(ctx, nodeName) + if len(ret) == 0 { + panic("no return value specified for GetEndpointInfo") + } + var r0 fftypes.JSONObject var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (fftypes.JSONObject, error)); ok { @@ -123,6 +143,10 @@ func (_m *Plugin) GetEndpointInfo(ctx context.Context, nodeName string) (fftypes func (_m *Plugin) GetPeerID(peer fftypes.JSONObject) string { ret := _m.Called(peer) + if len(ret) == 0 { + panic("no return value specified for GetPeerID") + } + var r0 string if rf, ok := ret.Get(0).(func(fftypes.JSONObject) string); ok { r0 = rf(peer) @@ -137,6 +161,10 @@ func (_m *Plugin) GetPeerID(peer fftypes.JSONObject) string { func (_m *Plugin) Init(ctx context.Context, cancelCtx context.CancelFunc, _a2 config.Section) error { ret := _m.Called(ctx, cancelCtx, _a2) + if len(ret) == 0 { + panic("no return value specified for Init") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, context.CancelFunc, config.Section) error); ok { r0 = rf(ctx, cancelCtx, _a2) @@ -156,6 +184,10 @@ func (_m *Plugin) InitConfig(_a0 config.Section) { func (_m *Plugin) Name() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Name") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -170,6 +202,10 @@ func (_m *Plugin) Name() string { func (_m *Plugin) SendMessage(ctx context.Context, nsOpID string, peer fftypes.JSONObject, sender fftypes.JSONObject, data []byte) error { ret := _m.Called(ctx, nsOpID, peer, sender, data) + if len(ret) == 0 { + panic("no return value specified for SendMessage") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, fftypes.JSONObject, fftypes.JSONObject, []byte) error); ok { r0 = rf(ctx, nsOpID, peer, sender, data) @@ -194,6 +230,10 @@ func (_m *Plugin) SetOperationHandler(namespace string, handler core.OperationCa func (_m *Plugin) Start() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Start") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -208,6 +248,10 @@ func (_m *Plugin) Start() error { func (_m *Plugin) TransferBlob(ctx context.Context, nsOpID string, peer fftypes.JSONObject, sender fftypes.JSONObject, payloadRef string) error { ret := _m.Called(ctx, nsOpID, peer, sender, payloadRef) + if len(ret) == 0 { + panic("no return value specified for TransferBlob") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, fftypes.JSONObject, fftypes.JSONObject, string) error); ok { r0 = rf(ctx, nsOpID, peer, sender, payloadRef) @@ -222,6 +266,10 @@ func (_m *Plugin) TransferBlob(ctx context.Context, nsOpID string, peer fftypes. func (_m *Plugin) UploadBlob(ctx context.Context, ns string, id fftypes.UUID, content io.Reader) (string, *fftypes.Bytes32, int64, error) { ret := _m.Called(ctx, ns, id, content) + if len(ret) == 0 { + panic("no return value specified for UploadBlob") + } + var r0 string var r1 *fftypes.Bytes32 var r2 int64 @@ -258,13 +306,12 @@ func (_m *Plugin) UploadBlob(ctx context.Context, ns string, id fftypes.UUID, co return r0, r1, r2, r3 } -type mockConstructorTestingTNewPlugin interface { +// NewPlugin creates a new instance of Plugin. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPlugin(t interface { mock.TestingT Cleanup(func()) -} - -// NewPlugin creates a new instance of Plugin. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPlugin(t mockConstructorTestingTNewPlugin) *Plugin { +}) *Plugin { mock := &Plugin{} mock.Mock.Test(t) diff --git a/mocks/datamocks/manager.go b/mocks/datamocks/manager.go index 1f41fb57a2..aaf3139506 100644 --- a/mocks/datamocks/manager.go +++ b/mocks/datamocks/manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package datamocks @@ -26,6 +26,10 @@ type Manager struct { func (_m *Manager) BlobsEnabled() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for BlobsEnabled") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -40,6 +44,10 @@ func (_m *Manager) BlobsEnabled() bool { func (_m *Manager) CheckDatatype(ctx context.Context, datatype *core.Datatype) error { ret := _m.Called(ctx, datatype) + if len(ret) == 0 { + panic("no return value specified for CheckDatatype") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.Datatype) error); ok { r0 = rf(ctx, datatype) @@ -54,6 +62,10 @@ func (_m *Manager) CheckDatatype(ctx context.Context, datatype *core.Datatype) e func (_m *Manager) DeleteData(ctx context.Context, dataID string) error { ret := _m.Called(ctx, dataID) + if len(ret) == 0 { + panic("no return value specified for DeleteData") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { r0 = rf(ctx, dataID) @@ -68,6 +80,10 @@ func (_m *Manager) DeleteData(ctx context.Context, dataID string) error { func (_m *Manager) DownloadBlob(ctx context.Context, dataID string) (*core.Blob, io.ReadCloser, error) { ret := _m.Called(ctx, dataID) + if len(ret) == 0 { + panic("no return value specified for DownloadBlob") + } + var r0 *core.Blob var r1 io.ReadCloser var r2 error @@ -110,6 +126,10 @@ func (_m *Manager) GetMessageDataCached(ctx context.Context, msg *core.Message, _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for GetMessageDataCached") + } + var r0 core.DataArray var r1 bool var r2 error @@ -150,6 +170,10 @@ func (_m *Manager) GetMessageWithDataCached(ctx context.Context, msgID *fftypes. _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for GetMessageWithDataCached") + } + var r0 *core.Message var r1 core.DataArray var r2 bool @@ -192,6 +216,10 @@ func (_m *Manager) GetMessageWithDataCached(ctx context.Context, msgID *fftypes. func (_m *Manager) HydrateBatch(ctx context.Context, persistedBatch *core.BatchPersisted) (*core.Batch, error) { ret := _m.Called(ctx, persistedBatch) + if len(ret) == 0 { + panic("no return value specified for HydrateBatch") + } + var r0 *core.Batch var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.BatchPersisted) (*core.Batch, error)); ok { @@ -225,6 +253,10 @@ func (_m *Manager) PeekMessageCache(ctx context.Context, id *fftypes.UUID, optio _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for PeekMessageCache") + } + var r0 *core.Message var r1 core.DataArray if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID, ...data.CacheReadOption) (*core.Message, core.DataArray)); ok { @@ -253,6 +285,10 @@ func (_m *Manager) PeekMessageCache(ctx context.Context, id *fftypes.UUID, optio func (_m *Manager) ResolveInlineData(ctx context.Context, msg *data.NewMessage) error { ret := _m.Called(ctx, msg) + if len(ret) == 0 { + panic("no return value specified for ResolveInlineData") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *data.NewMessage) error); ok { r0 = rf(ctx, msg) @@ -278,15 +314,19 @@ func (_m *Manager) UpdateMessageIfCached(ctx context.Context, msg *core.Message) _m.Called(ctx, msg) } -// UpdateMessageStateIfCached provides a mock function with given fields: ctx, id, state, confirmed -func (_m *Manager) UpdateMessageStateIfCached(ctx context.Context, id *fftypes.UUID, state fftypes.FFEnum, confirmed *fftypes.FFTime) { - _m.Called(ctx, id, state, confirmed) +// UpdateMessageStateIfCached provides a mock function with given fields: ctx, id, state, confirmed, rejectReason +func (_m *Manager) UpdateMessageStateIfCached(ctx context.Context, id *fftypes.UUID, state fftypes.FFEnum, confirmed *fftypes.FFTime, rejectReason string) { + _m.Called(ctx, id, state, confirmed, rejectReason) } // UploadBlob provides a mock function with given fields: ctx, inData, blob, autoMeta func (_m *Manager) UploadBlob(ctx context.Context, inData *core.DataRefOrValue, blob *ffapi.Multipart, autoMeta bool) (*core.Data, error) { ret := _m.Called(ctx, inData, blob, autoMeta) + if len(ret) == 0 { + panic("no return value specified for UploadBlob") + } + var r0 *core.Data var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.DataRefOrValue, *ffapi.Multipart, bool) (*core.Data, error)); ok { @@ -313,6 +353,10 @@ func (_m *Manager) UploadBlob(ctx context.Context, inData *core.DataRefOrValue, func (_m *Manager) UploadJSON(ctx context.Context, inData *core.DataRefOrValue) (*core.Data, error) { ret := _m.Called(ctx, inData) + if len(ret) == 0 { + panic("no return value specified for UploadJSON") + } + var r0 *core.Data var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.DataRefOrValue) (*core.Data, error)); ok { @@ -339,6 +383,10 @@ func (_m *Manager) UploadJSON(ctx context.Context, inData *core.DataRefOrValue) func (_m *Manager) ValidateAll(ctx context.Context, _a1 core.DataArray) (bool, error) { ret := _m.Called(ctx, _a1) + if len(ret) == 0 { + panic("no return value specified for ValidateAll") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(context.Context, core.DataArray) (bool, error)); ok { @@ -368,6 +416,10 @@ func (_m *Manager) WaitStop() { func (_m *Manager) WriteNewMessage(ctx context.Context, newMsg *data.NewMessage) error { ret := _m.Called(ctx, newMsg) + if len(ret) == 0 { + panic("no return value specified for WriteNewMessage") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *data.NewMessage) error); ok { r0 = rf(ctx, newMsg) @@ -378,13 +430,12 @@ func (_m *Manager) WriteNewMessage(ctx context.Context, newMsg *data.NewMessage) return r0 } -type mockConstructorTestingTNewManager interface { +// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewManager(t interface { mock.TestingT Cleanup(func()) -} - -// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewManager(t mockConstructorTestingTNewManager) *Manager { +}) *Manager { mock := &Manager{} mock.Mock.Test(t) diff --git a/mocks/definitionsmocks/handler.go b/mocks/definitionsmocks/handler.go index 9182de7f5b..c67c6db42c 100644 --- a/mocks/definitionsmocks/handler.go +++ b/mocks/definitionsmocks/handler.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package definitionsmocks @@ -22,6 +22,10 @@ type Handler struct { func (_m *Handler) HandleDefinitionBroadcast(ctx context.Context, state *core.BatchState, msg *core.Message, data core.DataArray, tx *fftypes.UUID) (definitions.HandlerResult, error) { ret := _m.Called(ctx, state, msg, data, tx) + if len(ret) == 0 { + panic("no return value specified for HandleDefinitionBroadcast") + } + var r0 definitions.HandlerResult var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.BatchState, *core.Message, core.DataArray, *fftypes.UUID) (definitions.HandlerResult, error)); ok { @@ -42,13 +46,12 @@ func (_m *Handler) HandleDefinitionBroadcast(ctx context.Context, state *core.Ba return r0, r1 } -type mockConstructorTestingTNewHandler interface { +// NewHandler creates a new instance of Handler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewHandler(t interface { mock.TestingT Cleanup(func()) -} - -// NewHandler creates a new instance of Handler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewHandler(t mockConstructorTestingTNewHandler) *Handler { +}) *Handler { mock := &Handler{} mock.Mock.Test(t) diff --git a/mocks/definitionsmocks/sender.go b/mocks/definitionsmocks/sender.go index d291e4b7ce..28fd4736c2 100644 --- a/mocks/definitionsmocks/sender.go +++ b/mocks/definitionsmocks/sender.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package definitionsmocks @@ -17,13 +17,17 @@ type Sender struct { mock.Mock } -// ClaimIdentity provides a mock function with given fields: ctx, def, signingIdentity, parentSigner, waitConfirm -func (_m *Sender) ClaimIdentity(ctx context.Context, def *core.IdentityClaim, signingIdentity *core.SignerRef, parentSigner *core.SignerRef, waitConfirm bool) error { - ret := _m.Called(ctx, def, signingIdentity, parentSigner, waitConfirm) +// ClaimIdentity provides a mock function with given fields: ctx, def, signingIdentity, parentSigner +func (_m *Sender) ClaimIdentity(ctx context.Context, def *core.IdentityClaim, signingIdentity *core.SignerRef, parentSigner *core.SignerRef) error { + ret := _m.Called(ctx, def, signingIdentity, parentSigner) + + if len(ret) == 0 { + panic("no return value specified for ClaimIdentity") + } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *core.IdentityClaim, *core.SignerRef, *core.SignerRef, bool) error); ok { - r0 = rf(ctx, def, signingIdentity, parentSigner, waitConfirm) + if rf, ok := ret.Get(0).(func(context.Context, *core.IdentityClaim, *core.SignerRef, *core.SignerRef) error); ok { + r0 = rf(ctx, def, signingIdentity, parentSigner) } else { r0 = ret.Error(0) } @@ -35,6 +39,10 @@ func (_m *Sender) ClaimIdentity(ctx context.Context, def *core.IdentityClaim, si func (_m *Sender) DefineContractAPI(ctx context.Context, httpServerURL string, api *core.ContractAPI, waitConfirm bool) error { ret := _m.Called(ctx, httpServerURL, api, waitConfirm) + if len(ret) == 0 { + panic("no return value specified for DefineContractAPI") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, *core.ContractAPI, bool) error); ok { r0 = rf(ctx, httpServerURL, api, waitConfirm) @@ -49,6 +57,10 @@ func (_m *Sender) DefineContractAPI(ctx context.Context, httpServerURL string, a func (_m *Sender) DefineDatatype(ctx context.Context, datatype *core.Datatype, waitConfirm bool) error { ret := _m.Called(ctx, datatype, waitConfirm) + if len(ret) == 0 { + panic("no return value specified for DefineDatatype") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.Datatype, bool) error); ok { r0 = rf(ctx, datatype, waitConfirm) @@ -63,6 +75,10 @@ func (_m *Sender) DefineDatatype(ctx context.Context, datatype *core.Datatype, w func (_m *Sender) DefineFFI(ctx context.Context, ffi *fftypes.FFI, waitConfirm bool) error { ret := _m.Called(ctx, ffi, waitConfirm) + if len(ret) == 0 { + panic("no return value specified for DefineFFI") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.FFI, bool) error); ok { r0 = rf(ctx, ffi, waitConfirm) @@ -74,11 +90,15 @@ func (_m *Sender) DefineFFI(ctx context.Context, ffi *fftypes.FFI, waitConfirm b } // DefineTokenPool provides a mock function with given fields: ctx, pool, waitConfirm -func (_m *Sender) DefineTokenPool(ctx context.Context, pool *core.TokenPoolAnnouncement, waitConfirm bool) error { +func (_m *Sender) DefineTokenPool(ctx context.Context, pool *core.TokenPool, waitConfirm bool) error { ret := _m.Called(ctx, pool, waitConfirm) + if len(ret) == 0 { + panic("no return value specified for DefineTokenPool") + } + var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *core.TokenPoolAnnouncement, bool) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, *core.TokenPool, bool) error); ok { r0 = rf(ctx, pool, waitConfirm) } else { r0 = ret.Error(0) @@ -91,6 +111,10 @@ func (_m *Sender) DefineTokenPool(ctx context.Context, pool *core.TokenPoolAnnou func (_m *Sender) Name() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Name") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -101,10 +125,104 @@ func (_m *Sender) Name() string { return r0 } +// PublishContractAPI provides a mock function with given fields: ctx, httpServerURL, name, networkName, waitConfirm +func (_m *Sender) PublishContractAPI(ctx context.Context, httpServerURL string, name string, networkName string, waitConfirm bool) (*core.ContractAPI, error) { + ret := _m.Called(ctx, httpServerURL, name, networkName, waitConfirm) + + if len(ret) == 0 { + panic("no return value specified for PublishContractAPI") + } + + var r0 *core.ContractAPI + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, bool) (*core.ContractAPI, error)); ok { + return rf(ctx, httpServerURL, name, networkName, waitConfirm) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, bool) *core.ContractAPI); ok { + r0 = rf(ctx, httpServerURL, name, networkName, waitConfirm) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*core.ContractAPI) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, bool) error); ok { + r1 = rf(ctx, httpServerURL, name, networkName, waitConfirm) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PublishFFI provides a mock function with given fields: ctx, name, version, networkName, waitConfirm +func (_m *Sender) PublishFFI(ctx context.Context, name string, version string, networkName string, waitConfirm bool) (*fftypes.FFI, error) { + ret := _m.Called(ctx, name, version, networkName, waitConfirm) + + if len(ret) == 0 { + panic("no return value specified for PublishFFI") + } + + var r0 *fftypes.FFI + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, bool) (*fftypes.FFI, error)); ok { + return rf(ctx, name, version, networkName, waitConfirm) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, bool) *fftypes.FFI); ok { + r0 = rf(ctx, name, version, networkName, waitConfirm) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*fftypes.FFI) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, bool) error); ok { + r1 = rf(ctx, name, version, networkName, waitConfirm) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PublishTokenPool provides a mock function with given fields: ctx, poolNameOrID, networkName, waitConfirm +func (_m *Sender) PublishTokenPool(ctx context.Context, poolNameOrID string, networkName string, waitConfirm bool) (*core.TokenPool, error) { + ret := _m.Called(ctx, poolNameOrID, networkName, waitConfirm) + + if len(ret) == 0 { + panic("no return value specified for PublishTokenPool") + } + + var r0 *core.TokenPool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, bool) (*core.TokenPool, error)); ok { + return rf(ctx, poolNameOrID, networkName, waitConfirm) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, bool) *core.TokenPool); ok { + r0 = rf(ctx, poolNameOrID, networkName, waitConfirm) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*core.TokenPool) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, bool) error); ok { + r1 = rf(ctx, poolNameOrID, networkName, waitConfirm) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // UpdateIdentity provides a mock function with given fields: ctx, identity, def, signingIdentity, waitConfirm func (_m *Sender) UpdateIdentity(ctx context.Context, identity *core.Identity, def *core.IdentityUpdate, signingIdentity *core.SignerRef, waitConfirm bool) error { ret := _m.Called(ctx, identity, def, signingIdentity, waitConfirm) + if len(ret) == 0 { + panic("no return value specified for UpdateIdentity") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.Identity, *core.IdentityUpdate, *core.SignerRef, bool) error); ok { r0 = rf(ctx, identity, def, signingIdentity, waitConfirm) @@ -115,13 +233,12 @@ func (_m *Sender) UpdateIdentity(ctx context.Context, identity *core.Identity, d return r0 } -type mockConstructorTestingTNewSender interface { +// NewSender creates a new instance of Sender. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSender(t interface { mock.TestingT Cleanup(func()) -} - -// NewSender creates a new instance of Sender. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewSender(t mockConstructorTestingTNewSender) *Sender { +}) *Sender { mock := &Sender{} mock.Mock.Test(t) diff --git a/mocks/eventmocks/event_manager.go b/mocks/eventmocks/event_manager.go index 7f75517237..0251218d91 100644 --- a/mocks/eventmocks/event_manager.go +++ b/mocks/eventmocks/event_manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package eventmocks @@ -15,6 +15,8 @@ import ( mock "github.com/stretchr/testify/mock" + pkgevents "github.com/hyperledger/firefly/pkg/events" + sharedstorage "github.com/hyperledger/firefly/pkg/sharedstorage" system "github.com/hyperledger/firefly/internal/events/system" @@ -31,23 +33,13 @@ type EventManager struct { func (_m *EventManager) AddSystemEventListener(ns string, el system.EventListener) error { ret := _m.Called(ns, el) - var r0 error - if rf, ok := ret.Get(0).(func(string, system.EventListener) error); ok { - r0 = rf(ns, el) - } else { - r0 = ret.Error(0) + if len(ret) == 0 { + panic("no return value specified for AddSystemEventListener") } - return r0 -} - -// BatchPinComplete provides a mock function with given fields: namespace, batch, signingKey -func (_m *EventManager) BatchPinComplete(namespace string, batch *blockchain.BatchPin, signingKey *core.VerifierRef) error { - ret := _m.Called(namespace, batch, signingKey) - var r0 error - if rf, ok := ret.Get(0).(func(string, *blockchain.BatchPin, *core.VerifierRef) error); ok { - r0 = rf(namespace, batch, signingKey) + if rf, ok := ret.Get(0).(func(string, system.EventListener) error); ok { + r0 = rf(ns, el) } else { r0 = ret.Error(0) } @@ -55,27 +47,17 @@ func (_m *EventManager) BatchPinComplete(namespace string, batch *blockchain.Bat return r0 } -// BlockchainEvent provides a mock function with given fields: event -func (_m *EventManager) BlockchainEvent(event *blockchain.EventWithSubscription) error { - ret := _m.Called(event) +// BlockchainEventBatch provides a mock function with given fields: batch +func (_m *EventManager) BlockchainEventBatch(batch []*blockchain.EventToDispatch) error { + ret := _m.Called(batch) - var r0 error - if rf, ok := ret.Get(0).(func(*blockchain.EventWithSubscription) error); ok { - r0 = rf(event) - } else { - r0 = ret.Error(0) + if len(ret) == 0 { + panic("no return value specified for BlockchainEventBatch") } - return r0 -} - -// BlockchainNetworkAction provides a mock function with given fields: action, location, event, signingKey -func (_m *EventManager) BlockchainNetworkAction(action string, location *fftypes.JSONAny, event *blockchain.Event, signingKey *core.VerifierRef) error { - ret := _m.Called(action, location, event, signingKey) - var r0 error - if rf, ok := ret.Get(0).(func(string, *fftypes.JSONAny, *blockchain.Event, *core.VerifierRef) error); ok { - r0 = rf(action, location, event, signingKey) + if rf, ok := ret.Get(0).(func([]*blockchain.EventToDispatch) error); ok { + r0 = rf(batch) } else { r0 = ret.Error(0) } @@ -87,6 +69,10 @@ func (_m *EventManager) BlockchainNetworkAction(action string, location *fftypes func (_m *EventManager) CreateUpdateDurableSubscription(ctx context.Context, subDef *core.Subscription, mustNew bool) error { ret := _m.Called(ctx, subDef, mustNew) + if len(ret) == 0 { + panic("no return value specified for CreateUpdateDurableSubscription") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.Subscription, bool) error); ok { r0 = rf(ctx, subDef, mustNew) @@ -101,6 +87,10 @@ func (_m *EventManager) CreateUpdateDurableSubscription(ctx context.Context, sub func (_m *EventManager) DXEvent(plugin dataexchange.Plugin, event dataexchange.DXEvent) error { ret := _m.Called(plugin, event) + if len(ret) == 0 { + panic("no return value specified for DXEvent") + } + var r0 error if rf, ok := ret.Get(0).(func(dataexchange.Plugin, dataexchange.DXEvent) error); ok { r0 = rf(plugin, event) @@ -115,6 +105,10 @@ func (_m *EventManager) DXEvent(plugin dataexchange.Plugin, event dataexchange.D func (_m *EventManager) DeleteDurableSubscription(ctx context.Context, subDef *core.Subscription) error { ret := _m.Called(ctx, subDef) + if len(ret) == 0 { + panic("no return value specified for DeleteDurableSubscription") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.Subscription) error); ok { r0 = rf(ctx, subDef) @@ -129,6 +123,10 @@ func (_m *EventManager) DeleteDurableSubscription(ctx context.Context, subDef *c func (_m *EventManager) DeletedSubscriptions() chan<- *fftypes.UUID { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for DeletedSubscriptions") + } + var r0 chan<- *fftypes.UUID if rf, ok := ret.Get(0).(func() chan<- *fftypes.UUID); ok { r0 = rf() @@ -145,6 +143,10 @@ func (_m *EventManager) DeletedSubscriptions() chan<- *fftypes.UUID { func (_m *EventManager) EnrichEvent(ctx context.Context, event *core.Event) (*core.EnrichedEvent, error) { ret := _m.Called(ctx, event) + if len(ret) == 0 { + panic("no return value specified for EnrichEvent") + } + var r0 *core.EnrichedEvent var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.Event) (*core.EnrichedEvent, error)); ok { @@ -171,6 +173,10 @@ func (_m *EventManager) EnrichEvent(ctx context.Context, event *core.Event) (*co func (_m *EventManager) GetPlugins() []*core.NamespaceStatusPlugin { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetPlugins") + } + var r0 []*core.NamespaceStatusPlugin if rf, ok := ret.Get(0).(func() []*core.NamespaceStatusPlugin); ok { r0 = rf() @@ -187,6 +193,10 @@ func (_m *EventManager) GetPlugins() []*core.NamespaceStatusPlugin { func (_m *EventManager) NewEvents() chan<- int64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for NewEvents") + } + var r0 chan<- int64 if rf, ok := ret.Get(0).(func() chan<- int64); ok { r0 = rf() @@ -203,6 +213,10 @@ func (_m *EventManager) NewEvents() chan<- int64 { func (_m *EventManager) NewPins() chan<- int64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for NewPins") + } + var r0 chan<- int64 if rf, ok := ret.Get(0).(func() chan<- int64); ok { r0 = rf() @@ -219,6 +233,10 @@ func (_m *EventManager) NewPins() chan<- int64 { func (_m *EventManager) NewSubscriptions() chan<- *fftypes.UUID { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for NewSubscriptions") + } + var r0 chan<- *fftypes.UUID if rf, ok := ret.Get(0).(func() chan<- *fftypes.UUID); ok { r0 = rf() @@ -236,10 +254,51 @@ func (_m *EventManager) QueueBatchRewind(batchID *fftypes.UUID) { _m.Called(batchID) } +// ResolveTransportAndCapabilities provides a mock function with given fields: ctx, transportName +func (_m *EventManager) ResolveTransportAndCapabilities(ctx context.Context, transportName string) (string, *pkgevents.Capabilities, error) { + ret := _m.Called(ctx, transportName) + + if len(ret) == 0 { + panic("no return value specified for ResolveTransportAndCapabilities") + } + + var r0 string + var r1 *pkgevents.Capabilities + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, string) (string, *pkgevents.Capabilities, error)); ok { + return rf(ctx, transportName) + } + if rf, ok := ret.Get(0).(func(context.Context, string) string); ok { + r0 = rf(ctx, transportName) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context, string) *pkgevents.Capabilities); ok { + r1 = rf(ctx, transportName) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*pkgevents.Capabilities) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, string) error); ok { + r2 = rf(ctx, transportName) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + // SharedStorageBatchDownloaded provides a mock function with given fields: ss, payloadRef, data func (_m *EventManager) SharedStorageBatchDownloaded(ss sharedstorage.Plugin, payloadRef string, data []byte) (*fftypes.UUID, error) { ret := _m.Called(ss, payloadRef, data) + if len(ret) == 0 { + panic("no return value specified for SharedStorageBatchDownloaded") + } + var r0 *fftypes.UUID var r1 error if rf, ok := ret.Get(0).(func(sharedstorage.Plugin, string, []byte) (*fftypes.UUID, error)); ok { @@ -266,6 +325,10 @@ func (_m *EventManager) SharedStorageBatchDownloaded(ss sharedstorage.Plugin, pa func (_m *EventManager) SharedStorageBlobDownloaded(ss sharedstorage.Plugin, hash fftypes.Bytes32, size int64, payloadRef string, dataID *fftypes.UUID) error { ret := _m.Called(ss, hash, size, payloadRef, dataID) + if len(ret) == 0 { + panic("no return value specified for SharedStorageBlobDownloaded") + } + var r0 error if rf, ok := ret.Get(0).(func(sharedstorage.Plugin, fftypes.Bytes32, int64, string, *fftypes.UUID) error); ok { r0 = rf(ss, hash, size, payloadRef, dataID) @@ -280,6 +343,10 @@ func (_m *EventManager) SharedStorageBlobDownloaded(ss sharedstorage.Plugin, has func (_m *EventManager) Start() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Start") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -294,6 +361,10 @@ func (_m *EventManager) Start() error { func (_m *EventManager) SubscriptionUpdates() chan<- *fftypes.UUID { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for SubscriptionUpdates") + } + var r0 chan<- *fftypes.UUID if rf, ok := ret.Get(0).(func() chan<- *fftypes.UUID); ok { r0 = rf() @@ -310,6 +381,10 @@ func (_m *EventManager) SubscriptionUpdates() chan<- *fftypes.UUID { func (_m *EventManager) TokenPoolCreated(ctx context.Context, ti tokens.Plugin, pool *tokens.TokenPool) error { ret := _m.Called(ctx, ti, pool) + if len(ret) == 0 { + panic("no return value specified for TokenPoolCreated") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, tokens.Plugin, *tokens.TokenPool) error); ok { r0 = rf(ctx, ti, pool) @@ -324,6 +399,10 @@ func (_m *EventManager) TokenPoolCreated(ctx context.Context, ti tokens.Plugin, func (_m *EventManager) TokensApproved(ti tokens.Plugin, approval *tokens.TokenApproval) error { ret := _m.Called(ti, approval) + if len(ret) == 0 { + panic("no return value specified for TokensApproved") + } + var r0 error if rf, ok := ret.Get(0).(func(tokens.Plugin, *tokens.TokenApproval) error); ok { r0 = rf(ti, approval) @@ -338,6 +417,10 @@ func (_m *EventManager) TokensApproved(ti tokens.Plugin, approval *tokens.TokenA func (_m *EventManager) TokensTransferred(ti tokens.Plugin, transfer *tokens.TokenTransfer) error { ret := _m.Called(ti, transfer) + if len(ret) == 0 { + panic("no return value specified for TokensTransferred") + } + var r0 error if rf, ok := ret.Get(0).(func(tokens.Plugin, *tokens.TokenTransfer) error); ok { r0 = rf(ti, transfer) @@ -353,13 +436,12 @@ func (_m *EventManager) WaitStop() { _m.Called() } -type mockConstructorTestingTNewEventManager interface { +// NewEventManager creates a new instance of EventManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEventManager(t interface { mock.TestingT Cleanup(func()) -} - -// NewEventManager creates a new instance of EventManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewEventManager(t mockConstructorTestingTNewEventManager) *EventManager { +}) *EventManager { mock := &EventManager{} mock.Mock.Test(t) diff --git a/mocks/eventsmocks/callbacks.go b/mocks/eventsmocks/callbacks.go index 2ef0a5f035..a018cbb77f 100644 --- a/mocks/eventsmocks/callbacks.go +++ b/mocks/eventsmocks/callbacks.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package eventsmocks @@ -28,6 +28,10 @@ func (_m *Callbacks) DeliveryResponse(connID string, inflight *core.EventDeliver func (_m *Callbacks) EphemeralSubscription(connID string, namespace string, filter *core.SubscriptionFilter, options *core.SubscriptionOptions) error { ret := _m.Called(connID, namespace, filter, options) + if len(ret) == 0 { + panic("no return value specified for EphemeralSubscription") + } + var r0 error if rf, ok := ret.Get(0).(func(string, string, *core.SubscriptionFilter, *core.SubscriptionOptions) error); ok { r0 = rf(connID, namespace, filter, options) @@ -42,6 +46,10 @@ func (_m *Callbacks) EphemeralSubscription(connID string, namespace string, filt func (_m *Callbacks) RegisterConnection(connID string, matcher events.SubscriptionMatcher) error { ret := _m.Called(connID, matcher) + if len(ret) == 0 { + panic("no return value specified for RegisterConnection") + } + var r0 error if rf, ok := ret.Get(0).(func(string, events.SubscriptionMatcher) error); ok { r0 = rf(connID, matcher) @@ -52,13 +60,12 @@ func (_m *Callbacks) RegisterConnection(connID string, matcher events.Subscripti return r0 } -type mockConstructorTestingTNewCallbacks interface { +// NewCallbacks creates a new instance of Callbacks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCallbacks(t interface { mock.TestingT Cleanup(func()) -} - -// NewCallbacks creates a new instance of Callbacks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCallbacks(t mockConstructorTestingTNewCallbacks) *Callbacks { +}) *Callbacks { mock := &Callbacks{} mock.Mock.Test(t) diff --git a/mocks/eventsmocks/plugin.go b/mocks/eventsmocks/plugin.go index 58d1fa2fe1..72b9b90573 100644 --- a/mocks/eventsmocks/plugin.go +++ b/mocks/eventsmocks/plugin.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package eventsmocks @@ -21,10 +21,32 @@ type Plugin struct { mock.Mock } +// BatchDeliveryRequest provides a mock function with given fields: ctx, connID, sub, _a3 +func (_m *Plugin) BatchDeliveryRequest(ctx context.Context, connID string, sub *core.Subscription, _a3 []*core.CombinedEventDataDelivery) error { + ret := _m.Called(ctx, connID, sub, _a3) + + if len(ret) == 0 { + panic("no return value specified for BatchDeliveryRequest") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, *core.Subscription, []*core.CombinedEventDataDelivery) error); ok { + r0 = rf(ctx, connID, sub, _a3) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // Capabilities provides a mock function with given fields: func (_m *Plugin) Capabilities() *events.Capabilities { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Capabilities") + } + var r0 *events.Capabilities if rf, ok := ret.Get(0).(func() *events.Capabilities); ok { r0 = rf() @@ -37,13 +59,17 @@ func (_m *Plugin) Capabilities() *events.Capabilities { return r0 } -// DeliveryRequest provides a mock function with given fields: connID, sub, event, data -func (_m *Plugin) DeliveryRequest(connID string, sub *core.Subscription, event *core.EventDelivery, data core.DataArray) error { - ret := _m.Called(connID, sub, event, data) +// DeliveryRequest provides a mock function with given fields: ctx, connID, sub, event, data +func (_m *Plugin) DeliveryRequest(ctx context.Context, connID string, sub *core.Subscription, event *core.EventDelivery, data core.DataArray) error { + ret := _m.Called(ctx, connID, sub, event, data) + + if len(ret) == 0 { + panic("no return value specified for DeliveryRequest") + } var r0 error - if rf, ok := ret.Get(0).(func(string, *core.Subscription, *core.EventDelivery, core.DataArray) error); ok { - r0 = rf(connID, sub, event, data) + if rf, ok := ret.Get(0).(func(context.Context, string, *core.Subscription, *core.EventDelivery, core.DataArray) error); ok { + r0 = rf(ctx, connID, sub, event, data) } else { r0 = ret.Error(0) } @@ -55,6 +81,10 @@ func (_m *Plugin) DeliveryRequest(connID string, sub *core.Subscription, event * func (_m *Plugin) Init(ctx context.Context, _a1 config.Section) error { ret := _m.Called(ctx, _a1) + if len(ret) == 0 { + panic("no return value specified for Init") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, config.Section) error); ok { r0 = rf(ctx, _a1) @@ -74,6 +104,10 @@ func (_m *Plugin) InitConfig(_a0 config.Section) { func (_m *Plugin) Name() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Name") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -93,6 +127,10 @@ func (_m *Plugin) NamespaceRestarted(ns string, startTime time.Time) { func (_m *Plugin) SetHandler(namespace string, handler events.Callbacks) error { ret := _m.Called(namespace, handler) + if len(ret) == 0 { + panic("no return value specified for SetHandler") + } + var r0 error if rf, ok := ret.Get(0).(func(string, events.Callbacks) error); ok { r0 = rf(namespace, handler) @@ -103,13 +141,17 @@ func (_m *Plugin) SetHandler(namespace string, handler events.Callbacks) error { return r0 } -// ValidateOptions provides a mock function with given fields: options -func (_m *Plugin) ValidateOptions(options *core.SubscriptionOptions) error { - ret := _m.Called(options) +// ValidateOptions provides a mock function with given fields: ctx, options +func (_m *Plugin) ValidateOptions(ctx context.Context, options *core.SubscriptionOptions) error { + ret := _m.Called(ctx, options) + + if len(ret) == 0 { + panic("no return value specified for ValidateOptions") + } var r0 error - if rf, ok := ret.Get(0).(func(*core.SubscriptionOptions) error); ok { - r0 = rf(options) + if rf, ok := ret.Get(0).(func(context.Context, *core.SubscriptionOptions) error); ok { + r0 = rf(ctx, options) } else { r0 = ret.Error(0) } @@ -117,13 +159,12 @@ func (_m *Plugin) ValidateOptions(options *core.SubscriptionOptions) error { return r0 } -type mockConstructorTestingTNewPlugin interface { +// NewPlugin creates a new instance of Plugin. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPlugin(t interface { mock.TestingT Cleanup(func()) -} - -// NewPlugin creates a new instance of Plugin. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPlugin(t mockConstructorTestingTNewPlugin) *Plugin { +}) *Plugin { mock := &Plugin{} mock.Mock.Test(t) diff --git a/mocks/identitymanagermocks/manager.go b/mocks/identitymanagermocks/manager.go index 5f82b238b7..6a0ed097d8 100644 --- a/mocks/identitymanagermocks/manager.go +++ b/mocks/identitymanagermocks/manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package identitymanagermocks @@ -23,6 +23,10 @@ type Manager struct { func (_m *Manager) CachedIdentityLookupByID(ctx context.Context, id *fftypes.UUID) (*core.Identity, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for CachedIdentityLookupByID") + } + var r0 *core.Identity var r1 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID) (*core.Identity, error)); ok { @@ -49,6 +53,10 @@ func (_m *Manager) CachedIdentityLookupByID(ctx context.Context, id *fftypes.UUI func (_m *Manager) CachedIdentityLookupMustExist(ctx context.Context, did string) (*core.Identity, bool, error) { ret := _m.Called(ctx, did) + if len(ret) == 0 { + panic("no return value specified for CachedIdentityLookupMustExist") + } + var r0 *core.Identity var r1 bool var r2 error @@ -82,6 +90,10 @@ func (_m *Manager) CachedIdentityLookupMustExist(ctx context.Context, did string func (_m *Manager) CachedIdentityLookupNilOK(ctx context.Context, did string) (*core.Identity, bool, error) { ret := _m.Called(ctx, did) + if len(ret) == 0 { + panic("no return value specified for CachedIdentityLookupNilOK") + } + var r0 *core.Identity var r1 bool var r2 error @@ -115,6 +127,10 @@ func (_m *Manager) CachedIdentityLookupNilOK(ctx context.Context, did string) (* func (_m *Manager) FindIdentityForVerifier(ctx context.Context, iTypes []fftypes.FFEnum, verifier *core.VerifierRef) (*core.Identity, error) { ret := _m.Called(ctx, iTypes, verifier) + if len(ret) == 0 { + panic("no return value specified for FindIdentityForVerifier") + } + var r0 *core.Identity var r1 error if rf, ok := ret.Get(0).(func(context.Context, []fftypes.FFEnum, *core.VerifierRef) (*core.Identity, error)); ok { @@ -141,6 +157,10 @@ func (_m *Manager) FindIdentityForVerifier(ctx context.Context, iTypes []fftypes func (_m *Manager) GetLocalNode(ctx context.Context) (*core.Identity, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for GetLocalNode") + } + var r0 *core.Identity var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*core.Identity, error)); ok { @@ -163,10 +183,14 @@ func (_m *Manager) GetLocalNode(ctx context.Context) (*core.Identity, error) { return r0, r1 } -// GetMultipartyRootOrg provides a mock function with given fields: ctx -func (_m *Manager) GetMultipartyRootOrg(ctx context.Context) (*core.Identity, error) { +// GetRootOrg provides a mock function with given fields: ctx +func (_m *Manager) GetRootOrg(ctx context.Context) (*core.Identity, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for GetRootOrg") + } + var r0 *core.Identity var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*core.Identity, error)); ok { @@ -189,21 +213,23 @@ func (_m *Manager) GetMultipartyRootOrg(ctx context.Context) (*core.Identity, er return r0, r1 } -// GetMultipartyRootVerifier provides a mock function with given fields: ctx -func (_m *Manager) GetMultipartyRootVerifier(ctx context.Context) (*core.VerifierRef, error) { +// GetRootOrgDID provides a mock function with given fields: ctx +func (_m *Manager) GetRootOrgDID(ctx context.Context) (string, error) { ret := _m.Called(ctx) - var r0 *core.VerifierRef + if len(ret) == 0 { + panic("no return value specified for GetRootOrgDID") + } + + var r0 string var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (*core.VerifierRef, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context) (string, error)); ok { return rf(ctx) } - if rf, ok := ret.Get(0).(func(context.Context) *core.VerifierRef); ok { + if rf, ok := ret.Get(0).(func(context.Context) string); ok { r0 = rf(ctx) } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*core.VerifierRef) - } + r0 = ret.Get(0).(string) } if rf, ok := ret.Get(1).(func(context.Context) error); ok { @@ -219,6 +245,10 @@ func (_m *Manager) GetMultipartyRootVerifier(ctx context.Context) (*core.Verifie func (_m *Manager) ResolveIdentitySigner(ctx context.Context, _a1 *core.Identity) (*core.SignerRef, error) { ret := _m.Called(ctx, _a1) + if len(ret) == 0 { + panic("no return value specified for ResolveIdentitySigner") + } + var r0 *core.SignerRef var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.Identity) (*core.SignerRef, error)); ok { @@ -245,6 +275,10 @@ func (_m *Manager) ResolveIdentitySigner(ctx context.Context, _a1 *core.Identity func (_m *Manager) ResolveInputSigningIdentity(ctx context.Context, signerRef *core.SignerRef) error { ret := _m.Called(ctx, signerRef) + if len(ret) == 0 { + panic("no return value specified for ResolveInputSigningIdentity") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.SignerRef) error); ok { r0 = rf(ctx, signerRef) @@ -259,6 +293,10 @@ func (_m *Manager) ResolveInputSigningIdentity(ctx context.Context, signerRef *c func (_m *Manager) ResolveInputSigningKey(ctx context.Context, inputKey string, keyNormalizationMode int) (string, error) { ret := _m.Called(ctx, inputKey, keyNormalizationMode) + if len(ret) == 0 { + panic("no return value specified for ResolveInputSigningKey") + } + var r0 string var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, int) (string, error)); ok { @@ -283,6 +321,10 @@ func (_m *Manager) ResolveInputSigningKey(ctx context.Context, inputKey string, func (_m *Manager) ResolveInputVerifierRef(ctx context.Context, inputKey *core.VerifierRef, intent blockchain.ResolveKeyIntent) (*core.VerifierRef, error) { ret := _m.Called(ctx, inputKey, intent) + if len(ret) == 0 { + panic("no return value specified for ResolveInputVerifierRef") + } + var r0 *core.VerifierRef var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.VerifierRef, blockchain.ResolveKeyIntent) (*core.VerifierRef, error)); ok { @@ -305,10 +347,44 @@ func (_m *Manager) ResolveInputVerifierRef(ctx context.Context, inputKey *core.V return r0, r1 } +// ResolveMultipartyRootVerifier provides a mock function with given fields: ctx +func (_m *Manager) ResolveMultipartyRootVerifier(ctx context.Context) (*core.VerifierRef, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for ResolveMultipartyRootVerifier") + } + + var r0 *core.VerifierRef + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*core.VerifierRef, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *core.VerifierRef); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*core.VerifierRef) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // ResolveQuerySigningKey provides a mock function with given fields: ctx, inputKey, keyNormalizationMode func (_m *Manager) ResolveQuerySigningKey(ctx context.Context, inputKey string, keyNormalizationMode int) (string, error) { ret := _m.Called(ctx, inputKey, keyNormalizationMode) + if len(ret) == 0 { + panic("no return value specified for ResolveQuerySigningKey") + } + var r0 string var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, int) (string, error)); ok { @@ -333,6 +409,10 @@ func (_m *Manager) ResolveQuerySigningKey(ctx context.Context, inputKey string, func (_m *Manager) ValidateNodeOwner(ctx context.Context, node *core.Identity, _a2 *core.Identity) (bool, error) { ret := _m.Called(ctx, node, _a2) + if len(ret) == 0 { + panic("no return value specified for ValidateNodeOwner") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.Identity, *core.Identity) (bool, error)); ok { @@ -357,6 +437,10 @@ func (_m *Manager) ValidateNodeOwner(ctx context.Context, node *core.Identity, _ func (_m *Manager) VerifyIdentityChain(ctx context.Context, _a1 *core.Identity) (*core.Identity, bool, error) { ret := _m.Called(ctx, _a1) + if len(ret) == 0 { + panic("no return value specified for VerifyIdentityChain") + } + var r0 *core.Identity var r1 bool var r2 error @@ -386,13 +470,12 @@ func (_m *Manager) VerifyIdentityChain(ctx context.Context, _a1 *core.Identity) return r0, r1, r2 } -type mockConstructorTestingTNewManager interface { +// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewManager(t interface { mock.TestingT Cleanup(func()) -} - -// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewManager(t mockConstructorTestingTNewManager) *Manager { +}) *Manager { mock := &Manager{} mock.Mock.Test(t) diff --git a/mocks/identitymocks/callbacks.go b/mocks/identitymocks/callbacks.go index eae23b5add..d3445fa433 100644 --- a/mocks/identitymocks/callbacks.go +++ b/mocks/identitymocks/callbacks.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package identitymocks @@ -9,13 +9,12 @@ type Callbacks struct { mock.Mock } -type mockConstructorTestingTNewCallbacks interface { +// NewCallbacks creates a new instance of Callbacks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCallbacks(t interface { mock.TestingT Cleanup(func()) -} - -// NewCallbacks creates a new instance of Callbacks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCallbacks(t mockConstructorTestingTNewCallbacks) *Callbacks { +}) *Callbacks { mock := &Callbacks{} mock.Mock.Test(t) diff --git a/mocks/identitymocks/plugin.go b/mocks/identitymocks/plugin.go index 06e671d76d..0d2c8e6b6a 100644 --- a/mocks/identitymocks/plugin.go +++ b/mocks/identitymocks/plugin.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package identitymocks @@ -21,6 +21,10 @@ type Plugin struct { func (_m *Plugin) Capabilities() *identity.Capabilities { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Capabilities") + } + var r0 *identity.Capabilities if rf, ok := ret.Get(0).(func() *identity.Capabilities); ok { r0 = rf() @@ -37,6 +41,10 @@ func (_m *Plugin) Capabilities() *identity.Capabilities { func (_m *Plugin) Init(ctx context.Context, _a1 config.Section) error { ret := _m.Called(ctx, _a1) + if len(ret) == 0 { + panic("no return value specified for Init") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, config.Section) error); ok { r0 = rf(ctx, _a1) @@ -56,6 +64,10 @@ func (_m *Plugin) InitConfig(_a0 config.Section) { func (_m *Plugin) Name() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Name") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -75,6 +87,10 @@ func (_m *Plugin) SetHandler(namespace string, handler identity.Callbacks) { func (_m *Plugin) Start() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Start") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -85,13 +101,12 @@ func (_m *Plugin) Start() error { return r0 } -type mockConstructorTestingTNewPlugin interface { +// NewPlugin creates a new instance of Plugin. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPlugin(t interface { mock.TestingT Cleanup(func()) -} - -// NewPlugin creates a new instance of Plugin. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPlugin(t mockConstructorTestingTNewPlugin) *Plugin { +}) *Plugin { mock := &Plugin{} mock.Mock.Test(t) diff --git a/mocks/metricsmocks/manager.go b/mocks/metricsmocks/manager.go index 7ed1e365b6..9e59f8fd49 100644 --- a/mocks/metricsmocks/manager.go +++ b/mocks/metricsmocks/manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package metricsmocks @@ -55,6 +55,10 @@ func (_m *Manager) DeleteTime(id string) { func (_m *Manager) GetTime(id string) time.Time { ret := _m.Called(id) + if len(ret) == 0 { + panic("no return value specified for GetTime") + } + var r0 time.Time if rf, ok := ret.Get(0).(func(string) time.Time); ok { r0 = rf(id) @@ -69,6 +73,10 @@ func (_m *Manager) GetTime(id string) time.Time { func (_m *Manager) IsMetricsEnabled() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for IsMetricsEnabled") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -99,13 +107,12 @@ func (_m *Manager) TransferSubmitted(transfer *core.TokenTransfer) { _m.Called(transfer) } -type mockConstructorTestingTNewManager interface { +// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewManager(t interface { mock.TestingT Cleanup(func()) -} - -// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewManager(t mockConstructorTestingTNewManager) *Manager { +}) *Manager { mock := &Manager{} mock.Mock.Test(t) diff --git a/mocks/multipartymocks/manager.go b/mocks/multipartymocks/manager.go index 58bec46bc4..c968b0bd18 100644 --- a/mocks/multipartymocks/manager.go +++ b/mocks/multipartymocks/manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package multipartymocks @@ -25,6 +25,10 @@ type Manager struct { func (_m *Manager) ConfigureContract(ctx context.Context) error { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for ConfigureContract") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context) error); ok { r0 = rf(ctx) @@ -39,6 +43,10 @@ func (_m *Manager) ConfigureContract(ctx context.Context) error { func (_m *Manager) GetNetworkVersion() int { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetNetworkVersion") + } + var r0 int if rf, ok := ret.Get(0).(func() int); ok { r0 = rf() @@ -53,6 +61,10 @@ func (_m *Manager) GetNetworkVersion() int { func (_m *Manager) LocalNode() multiparty.LocalNode { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for LocalNode") + } + var r0 multiparty.LocalNode if rf, ok := ret.Get(0).(func() multiparty.LocalNode); ok { r0 = rf() @@ -67,6 +79,10 @@ func (_m *Manager) LocalNode() multiparty.LocalNode { func (_m *Manager) Name() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Name") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -81,6 +97,10 @@ func (_m *Manager) Name() string { func (_m *Manager) PrepareOperation(ctx context.Context, op *core.Operation) (*core.PreparedOperation, error) { ret := _m.Called(ctx, op) + if len(ret) == 0 { + panic("no return value specified for PrepareOperation") + } + var r0 *core.PreparedOperation var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.Operation) (*core.PreparedOperation, error)); ok { @@ -107,6 +127,10 @@ func (_m *Manager) PrepareOperation(ctx context.Context, op *core.Operation) (*c func (_m *Manager) RootOrg() multiparty.RootOrg { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for RootOrg") + } + var r0 multiparty.RootOrg if rf, ok := ret.Get(0).(func() multiparty.RootOrg); ok { r0 = rf() @@ -118,13 +142,17 @@ func (_m *Manager) RootOrg() multiparty.RootOrg { } // RunOperation provides a mock function with given fields: ctx, op -func (_m *Manager) RunOperation(ctx context.Context, op *core.PreparedOperation) (fftypes.JSONObject, bool, error) { +func (_m *Manager) RunOperation(ctx context.Context, op *core.PreparedOperation) (fftypes.JSONObject, core.OpPhase, error) { ret := _m.Called(ctx, op) + if len(ret) == 0 { + panic("no return value specified for RunOperation") + } + var r0 fftypes.JSONObject - var r1 bool + var r1 core.OpPhase var r2 error - if rf, ok := ret.Get(0).(func(context.Context, *core.PreparedOperation) (fftypes.JSONObject, bool, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *core.PreparedOperation) (fftypes.JSONObject, core.OpPhase, error)); ok { return rf(ctx, op) } if rf, ok := ret.Get(0).(func(context.Context, *core.PreparedOperation) fftypes.JSONObject); ok { @@ -135,10 +163,10 @@ func (_m *Manager) RunOperation(ctx context.Context, op *core.PreparedOperation) } } - if rf, ok := ret.Get(1).(func(context.Context, *core.PreparedOperation) bool); ok { + if rf, ok := ret.Get(1).(func(context.Context, *core.PreparedOperation) core.OpPhase); ok { r1 = rf(ctx, op) } else { - r1 = ret.Get(1).(bool) + r1 = ret.Get(1).(core.OpPhase) } if rf, ok := ret.Get(2).(func(context.Context, *core.PreparedOperation) error); ok { @@ -150,13 +178,17 @@ func (_m *Manager) RunOperation(ctx context.Context, op *core.PreparedOperation) return r0, r1, r2 } -// SubmitBatchPin provides a mock function with given fields: ctx, batch, contexts, payloadRef -func (_m *Manager) SubmitBatchPin(ctx context.Context, batch *core.BatchPersisted, contexts []*fftypes.Bytes32, payloadRef string) error { - ret := _m.Called(ctx, batch, contexts, payloadRef) +// SubmitBatchPin provides a mock function with given fields: ctx, batch, contexts, payloadRef, idempotentSubmit +func (_m *Manager) SubmitBatchPin(ctx context.Context, batch *core.BatchPersisted, contexts []*fftypes.Bytes32, payloadRef string, idempotentSubmit bool) error { + ret := _m.Called(ctx, batch, contexts, payloadRef, idempotentSubmit) + + if len(ret) == 0 { + panic("no return value specified for SubmitBatchPin") + } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *core.BatchPersisted, []*fftypes.Bytes32, string) error); ok { - r0 = rf(ctx, batch, contexts, payloadRef) + if rf, ok := ret.Get(0).(func(context.Context, *core.BatchPersisted, []*fftypes.Bytes32, string, bool) error); ok { + r0 = rf(ctx, batch, contexts, payloadRef, idempotentSubmit) } else { r0 = ret.Error(0) } @@ -164,13 +196,17 @@ func (_m *Manager) SubmitBatchPin(ctx context.Context, batch *core.BatchPersiste return r0 } -// SubmitNetworkAction provides a mock function with given fields: ctx, signingKey, action -func (_m *Manager) SubmitNetworkAction(ctx context.Context, signingKey string, action *core.NetworkAction) error { - ret := _m.Called(ctx, signingKey, action) +// SubmitNetworkAction provides a mock function with given fields: ctx, signingKey, action, idempotentSubmit +func (_m *Manager) SubmitNetworkAction(ctx context.Context, signingKey string, action *core.NetworkAction, idempotentSubmit bool) error { + ret := _m.Called(ctx, signingKey, action, idempotentSubmit) + + if len(ret) == 0 { + panic("no return value specified for SubmitNetworkAction") + } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string, *core.NetworkAction) error); ok { - r0 = rf(ctx, signingKey, action) + if rf, ok := ret.Get(0).(func(context.Context, string, *core.NetworkAction, bool) error); ok { + r0 = rf(ctx, signingKey, action, idempotentSubmit) } else { r0 = ret.Error(0) } @@ -182,6 +218,10 @@ func (_m *Manager) SubmitNetworkAction(ctx context.Context, signingKey string, a func (_m *Manager) TerminateContract(ctx context.Context, location *fftypes.JSONAny, termination *blockchain.Event) error { ret := _m.Called(ctx, location, termination) + if len(ret) == 0 { + panic("no return value specified for TerminateContract") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.JSONAny, *blockchain.Event) error); ok { r0 = rf(ctx, location, termination) @@ -192,13 +232,12 @@ func (_m *Manager) TerminateContract(ctx context.Context, location *fftypes.JSON return r0 } -type mockConstructorTestingTNewManager interface { +// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewManager(t interface { mock.TestingT Cleanup(func()) -} - -// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewManager(t mockConstructorTestingTNewManager) *Manager { +}) *Manager { mock := &Manager{} mock.Mock.Test(t) diff --git a/mocks/namespacemocks/manager.go b/mocks/namespacemocks/manager.go index f3558a4ab8..b28d3e5879 100644 --- a/mocks/namespacemocks/manager.go +++ b/mocks/namespacemocks/manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package namespacemocks @@ -24,6 +24,10 @@ type Manager struct { func (_m *Manager) Authorize(ctx context.Context, authReq *fftypes.AuthReq) error { ret := _m.Called(ctx, authReq) + if len(ret) == 0 { + panic("no return value specified for Authorize") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.AuthReq) error); ok { r0 = rf(ctx, authReq) @@ -38,6 +42,10 @@ func (_m *Manager) Authorize(ctx context.Context, authReq *fftypes.AuthReq) erro func (_m *Manager) GetNamespaces(ctx context.Context, includeInitializing bool) ([]*core.NamespaceWithInitStatus, error) { ret := _m.Called(ctx, includeInitializing) + if len(ret) == 0 { + panic("no return value specified for GetNamespaces") + } + var r0 []*core.NamespaceWithInitStatus var r1 error if rf, ok := ret.Get(0).(func(context.Context, bool) ([]*core.NamespaceWithInitStatus, error)); ok { @@ -64,6 +72,10 @@ func (_m *Manager) GetNamespaces(ctx context.Context, includeInitializing bool) func (_m *Manager) GetOperationByNamespacedID(ctx context.Context, nsOpID string) (*core.Operation, error) { ret := _m.Called(ctx, nsOpID) + if len(ret) == 0 { + panic("no return value specified for GetOperationByNamespacedID") + } + var r0 *core.Operation var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.Operation, error)); ok { @@ -90,6 +102,10 @@ func (_m *Manager) GetOperationByNamespacedID(ctx context.Context, nsOpID string func (_m *Manager) Init(ctx context.Context, cancelCtx context.CancelFunc, reset chan bool, reloadConfig func() error) error { ret := _m.Called(ctx, cancelCtx, reset, reloadConfig) + if len(ret) == 0 { + panic("no return value specified for Init") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, context.CancelFunc, chan bool, func() error) error); ok { r0 = rf(ctx, cancelCtx, reset, reloadConfig) @@ -104,6 +120,10 @@ func (_m *Manager) Init(ctx context.Context, cancelCtx context.CancelFunc, reset func (_m *Manager) MustOrchestrator(ns string) orchestrator.Orchestrator { ret := _m.Called(ns) + if len(ret) == 0 { + panic("no return value specified for MustOrchestrator") + } + var r0 orchestrator.Orchestrator if rf, ok := ret.Get(0).(func(string) orchestrator.Orchestrator); ok { r0 = rf(ns) @@ -120,6 +140,10 @@ func (_m *Manager) MustOrchestrator(ns string) orchestrator.Orchestrator { func (_m *Manager) Orchestrator(ctx context.Context, ns string, includeInitializing bool) (orchestrator.Orchestrator, error) { ret := _m.Called(ctx, ns, includeInitializing) + if len(ret) == 0 { + panic("no return value specified for Orchestrator") + } + var r0 orchestrator.Orchestrator var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, bool) (orchestrator.Orchestrator, error)); ok { @@ -146,6 +170,10 @@ func (_m *Manager) Orchestrator(ctx context.Context, ns string, includeInitializ func (_m *Manager) Reset(ctx context.Context) error { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for Reset") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context) error); ok { r0 = rf(ctx) @@ -160,6 +188,10 @@ func (_m *Manager) Reset(ctx context.Context) error { func (_m *Manager) ResolveOperationByNamespacedID(ctx context.Context, nsOpID string, op *core.OperationUpdateDTO) error { ret := _m.Called(ctx, nsOpID, op) + if len(ret) == 0 { + panic("no return value specified for ResolveOperationByNamespacedID") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, *core.OperationUpdateDTO) error); ok { r0 = rf(ctx, nsOpID, op) @@ -174,6 +206,10 @@ func (_m *Manager) ResolveOperationByNamespacedID(ctx context.Context, nsOpID st func (_m *Manager) SPIEvents() spievents.Manager { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for SPIEvents") + } + var r0 spievents.Manager if rf, ok := ret.Get(0).(func() spievents.Manager); ok { r0 = rf() @@ -190,6 +226,10 @@ func (_m *Manager) SPIEvents() spievents.Manager { func (_m *Manager) Start() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Start") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -205,13 +245,12 @@ func (_m *Manager) WaitStop() { _m.Called() } -type mockConstructorTestingTNewManager interface { +// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewManager(t interface { mock.TestingT Cleanup(func()) -} - -// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewManager(t mockConstructorTestingTNewManager) *Manager { +}) *Manager { mock := &Manager{} mock.Mock.Test(t) diff --git a/mocks/networkmapmocks/manager.go b/mocks/networkmapmocks/manager.go index 7cca4d5096..178021acc5 100644 --- a/mocks/networkmapmocks/manager.go +++ b/mocks/networkmapmocks/manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package networkmapmocks @@ -22,6 +22,10 @@ type Manager struct { func (_m *Manager) GetDIDDocForIndentityByDID(ctx context.Context, did string) (*networkmap.DIDDocument, error) { ret := _m.Called(ctx, did) + if len(ret) == 0 { + panic("no return value specified for GetDIDDocForIndentityByDID") + } + var r0 *networkmap.DIDDocument var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*networkmap.DIDDocument, error)); ok { @@ -48,6 +52,10 @@ func (_m *Manager) GetDIDDocForIndentityByDID(ctx context.Context, did string) ( func (_m *Manager) GetDIDDocForIndentityByID(ctx context.Context, id string) (*networkmap.DIDDocument, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetDIDDocForIndentityByID") + } + var r0 *networkmap.DIDDocument var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*networkmap.DIDDocument, error)); ok { @@ -74,6 +82,10 @@ func (_m *Manager) GetDIDDocForIndentityByID(ctx context.Context, id string) (*n func (_m *Manager) GetIdentities(ctx context.Context, filter ffapi.AndFilter) ([]*core.Identity, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetIdentities") + } + var r0 []*core.Identity var r1 *ffapi.FilterResult var r2 error @@ -109,6 +121,10 @@ func (_m *Manager) GetIdentities(ctx context.Context, filter ffapi.AndFilter) ([ func (_m *Manager) GetIdentitiesWithVerifiers(ctx context.Context, filter ffapi.AndFilter) ([]*core.IdentityWithVerifiers, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetIdentitiesWithVerifiers") + } + var r0 []*core.IdentityWithVerifiers var r1 *ffapi.FilterResult var r2 error @@ -144,6 +160,10 @@ func (_m *Manager) GetIdentitiesWithVerifiers(ctx context.Context, filter ffapi. func (_m *Manager) GetIdentityByDID(ctx context.Context, did string) (*core.Identity, error) { ret := _m.Called(ctx, did) + if len(ret) == 0 { + panic("no return value specified for GetIdentityByDID") + } + var r0 *core.Identity var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.Identity, error)); ok { @@ -170,6 +190,10 @@ func (_m *Manager) GetIdentityByDID(ctx context.Context, did string) (*core.Iden func (_m *Manager) GetIdentityByDIDWithVerifiers(ctx context.Context, did string) (*core.IdentityWithVerifiers, error) { ret := _m.Called(ctx, did) + if len(ret) == 0 { + panic("no return value specified for GetIdentityByDIDWithVerifiers") + } + var r0 *core.IdentityWithVerifiers var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.IdentityWithVerifiers, error)); ok { @@ -196,6 +220,10 @@ func (_m *Manager) GetIdentityByDIDWithVerifiers(ctx context.Context, did string func (_m *Manager) GetIdentityByID(ctx context.Context, id string) (*core.Identity, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetIdentityByID") + } + var r0 *core.Identity var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.Identity, error)); ok { @@ -222,6 +250,10 @@ func (_m *Manager) GetIdentityByID(ctx context.Context, id string) (*core.Identi func (_m *Manager) GetIdentityByIDWithVerifiers(ctx context.Context, id string) (*core.IdentityWithVerifiers, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetIdentityByIDWithVerifiers") + } + var r0 *core.IdentityWithVerifiers var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.IdentityWithVerifiers, error)); ok { @@ -248,6 +280,10 @@ func (_m *Manager) GetIdentityByIDWithVerifiers(ctx context.Context, id string) func (_m *Manager) GetIdentityVerifiers(ctx context.Context, id string, filter ffapi.AndFilter) ([]*core.Verifier, *ffapi.FilterResult, error) { ret := _m.Called(ctx, id, filter) + if len(ret) == 0 { + panic("no return value specified for GetIdentityVerifiers") + } + var r0 []*core.Verifier var r1 *ffapi.FilterResult var r2 error @@ -283,6 +319,10 @@ func (_m *Manager) GetIdentityVerifiers(ctx context.Context, id string, filter f func (_m *Manager) GetNodeByNameOrID(ctx context.Context, nameOrID string) (*core.Identity, error) { ret := _m.Called(ctx, nameOrID) + if len(ret) == 0 { + panic("no return value specified for GetNodeByNameOrID") + } + var r0 *core.Identity var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.Identity, error)); ok { @@ -309,6 +349,10 @@ func (_m *Manager) GetNodeByNameOrID(ctx context.Context, nameOrID string) (*cor func (_m *Manager) GetNodes(ctx context.Context, filter ffapi.AndFilter) ([]*core.Identity, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetNodes") + } + var r0 []*core.Identity var r1 *ffapi.FilterResult var r2 error @@ -344,6 +388,10 @@ func (_m *Manager) GetNodes(ctx context.Context, filter ffapi.AndFilter) ([]*cor func (_m *Manager) GetOrganizationByNameOrID(ctx context.Context, nameOrID string) (*core.Identity, error) { ret := _m.Called(ctx, nameOrID) + if len(ret) == 0 { + panic("no return value specified for GetOrganizationByNameOrID") + } + var r0 *core.Identity var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.Identity, error)); ok { @@ -370,6 +418,10 @@ func (_m *Manager) GetOrganizationByNameOrID(ctx context.Context, nameOrID strin func (_m *Manager) GetOrganizations(ctx context.Context, filter ffapi.AndFilter) ([]*core.Identity, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetOrganizations") + } + var r0 []*core.Identity var r1 *ffapi.FilterResult var r2 error @@ -405,6 +457,10 @@ func (_m *Manager) GetOrganizations(ctx context.Context, filter ffapi.AndFilter) func (_m *Manager) GetOrganizationsWithVerifiers(ctx context.Context, filter ffapi.AndFilter) ([]*core.IdentityWithVerifiers, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetOrganizationsWithVerifiers") + } + var r0 []*core.IdentityWithVerifiers var r1 *ffapi.FilterResult var r2 error @@ -440,6 +496,10 @@ func (_m *Manager) GetOrganizationsWithVerifiers(ctx context.Context, filter ffa func (_m *Manager) GetVerifierByHash(ctx context.Context, hash string) (*core.Verifier, error) { ret := _m.Called(ctx, hash) + if len(ret) == 0 { + panic("no return value specified for GetVerifierByHash") + } + var r0 *core.Verifier var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.Verifier, error)); ok { @@ -466,6 +526,10 @@ func (_m *Manager) GetVerifierByHash(ctx context.Context, hash string) (*core.Ve func (_m *Manager) GetVerifiers(ctx context.Context, filter ffapi.AndFilter) ([]*core.Verifier, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetVerifiers") + } + var r0 []*core.Verifier var r1 *ffapi.FilterResult var r2 error @@ -501,6 +565,10 @@ func (_m *Manager) GetVerifiers(ctx context.Context, filter ffapi.AndFilter) ([] func (_m *Manager) RegisterIdentity(ctx context.Context, dto *core.IdentityCreateDTO, waitConfirm bool) (*core.Identity, error) { ret := _m.Called(ctx, dto, waitConfirm) + if len(ret) == 0 { + panic("no return value specified for RegisterIdentity") + } + var r0 *core.Identity var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.IdentityCreateDTO, bool) (*core.Identity, error)); ok { @@ -527,6 +595,10 @@ func (_m *Manager) RegisterIdentity(ctx context.Context, dto *core.IdentityCreat func (_m *Manager) RegisterNode(ctx context.Context, waitConfirm bool) (*core.Identity, error) { ret := _m.Called(ctx, waitConfirm) + if len(ret) == 0 { + panic("no return value specified for RegisterNode") + } + var r0 *core.Identity var r1 error if rf, ok := ret.Get(0).(func(context.Context, bool) (*core.Identity, error)); ok { @@ -553,6 +625,10 @@ func (_m *Manager) RegisterNode(ctx context.Context, waitConfirm bool) (*core.Id func (_m *Manager) RegisterNodeOrganization(ctx context.Context, waitConfirm bool) (*core.Identity, error) { ret := _m.Called(ctx, waitConfirm) + if len(ret) == 0 { + panic("no return value specified for RegisterNodeOrganization") + } + var r0 *core.Identity var r1 error if rf, ok := ret.Get(0).(func(context.Context, bool) (*core.Identity, error)); ok { @@ -579,6 +655,10 @@ func (_m *Manager) RegisterNodeOrganization(ctx context.Context, waitConfirm boo func (_m *Manager) RegisterOrganization(ctx context.Context, org *core.IdentityCreateDTO, waitConfirm bool) (*core.Identity, error) { ret := _m.Called(ctx, org, waitConfirm) + if len(ret) == 0 { + panic("no return value specified for RegisterOrganization") + } + var r0 *core.Identity var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.IdentityCreateDTO, bool) (*core.Identity, error)); ok { @@ -605,6 +685,10 @@ func (_m *Manager) RegisterOrganization(ctx context.Context, org *core.IdentityC func (_m *Manager) UpdateIdentity(ctx context.Context, id string, dto *core.IdentityUpdateDTO, waitConfirm bool) (*core.Identity, error) { ret := _m.Called(ctx, id, dto, waitConfirm) + if len(ret) == 0 { + panic("no return value specified for UpdateIdentity") + } + var r0 *core.Identity var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *core.IdentityUpdateDTO, bool) (*core.Identity, error)); ok { @@ -627,13 +711,12 @@ func (_m *Manager) UpdateIdentity(ctx context.Context, id string, dto *core.Iden return r0, r1 } -type mockConstructorTestingTNewManager interface { +// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewManager(t interface { mock.TestingT Cleanup(func()) -} - -// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewManager(t mockConstructorTestingTNewManager) *Manager { +}) *Manager { mock := &Manager{} mock.Mock.Test(t) diff --git a/mocks/operationmocks/manager.go b/mocks/operationmocks/manager.go index b81651db22..7035ec6b9a 100644 --- a/mocks/operationmocks/manager.go +++ b/mocks/operationmocks/manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package operationmocks @@ -31,6 +31,10 @@ func (_m *Manager) AddOrReuseOperation(ctx context.Context, op *core.Operation, _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for AddOrReuseOperation") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.Operation, ...database.PostCompletionHook) error); ok { r0 = rf(ctx, op, hooks...) @@ -41,10 +45,39 @@ func (_m *Manager) AddOrReuseOperation(ctx context.Context, op *core.Operation, return r0 } +// BulkInsertOperations provides a mock function with given fields: ctx, ops +func (_m *Manager) BulkInsertOperations(ctx context.Context, ops ...*core.Operation) error { + _va := make([]interface{}, len(ops)) + for _i := range ops { + _va[_i] = ops[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for BulkInsertOperations") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, ...*core.Operation) error); ok { + r0 = rf(ctx, ops...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // GetOperationByIDCached provides a mock function with given fields: ctx, opID func (_m *Manager) GetOperationByIDCached(ctx context.Context, opID *fftypes.UUID) (*core.Operation, error) { ret := _m.Called(ctx, opID) + if len(ret) == 0 { + panic("no return value specified for GetOperationByIDCached") + } + var r0 *core.Operation var r1 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID) (*core.Operation, error)); ok { @@ -71,6 +104,10 @@ func (_m *Manager) GetOperationByIDCached(ctx context.Context, opID *fftypes.UUI func (_m *Manager) PrepareOperation(ctx context.Context, op *core.Operation) (*core.PreparedOperation, error) { ret := _m.Called(ctx, op) + if len(ret) == 0 { + panic("no return value specified for PrepareOperation") + } + var r0 *core.PreparedOperation var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.Operation) (*core.PreparedOperation, error)); ok { @@ -102,6 +139,10 @@ func (_m *Manager) RegisterHandler(ctx context.Context, handler operations.Opera func (_m *Manager) ResolveOperationByID(ctx context.Context, opID *fftypes.UUID, op *core.OperationUpdateDTO) error { ret := _m.Called(ctx, opID, op) + if len(ret) == 0 { + panic("no return value specified for ResolveOperationByID") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID, *core.OperationUpdateDTO) error); ok { r0 = rf(ctx, opID, op) @@ -113,35 +154,50 @@ func (_m *Manager) ResolveOperationByID(ctx context.Context, opID *fftypes.UUID, } // ResubmitOperations provides a mock function with given fields: ctx, txID -func (_m *Manager) ResubmitOperations(ctx context.Context, txID *fftypes.UUID) (*core.Operation, error) { +func (_m *Manager) ResubmitOperations(ctx context.Context, txID *fftypes.UUID) (int, []*core.Operation, error) { ret := _m.Called(ctx, txID) - var r0 *core.Operation - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID) (*core.Operation, error)); ok { + if len(ret) == 0 { + panic("no return value specified for ResubmitOperations") + } + + var r0 int + var r1 []*core.Operation + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID) (int, []*core.Operation, error)); ok { return rf(ctx, txID) } - if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID) *core.Operation); ok { + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID) int); ok { r0 = rf(ctx, txID) } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*core.Operation) - } + r0 = ret.Get(0).(int) } - if rf, ok := ret.Get(1).(func(context.Context, *fftypes.UUID) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.UUID) []*core.Operation); ok { r1 = rf(ctx, txID) } else { - r1 = ret.Error(1) + if ret.Get(1) != nil { + r1 = ret.Get(1).([]*core.Operation) + } } - return r0, r1 + if rf, ok := ret.Get(2).(func(context.Context, *fftypes.UUID) error); ok { + r2 = rf(ctx, txID) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 } // RetryOperation provides a mock function with given fields: ctx, opID func (_m *Manager) RetryOperation(ctx context.Context, opID *fftypes.UUID) (*core.Operation, error) { ret := _m.Called(ctx, opID) + if len(ret) == 0 { + panic("no return value specified for RetryOperation") + } + var r0 *core.Operation var r1 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID) (*core.Operation, error)); ok { @@ -164,32 +220,29 @@ func (_m *Manager) RetryOperation(ctx context.Context, opID *fftypes.UUID) (*cor return r0, r1 } -// RunOperation provides a mock function with given fields: ctx, op, options -func (_m *Manager) RunOperation(ctx context.Context, op *core.PreparedOperation, options ...operations.RunOperationOption) (fftypes.JSONObject, error) { - _va := make([]interface{}, len(options)) - for _i := range options { - _va[_i] = options[_i] +// RunOperation provides a mock function with given fields: ctx, op, idempotentSubmit +func (_m *Manager) RunOperation(ctx context.Context, op *core.PreparedOperation, idempotentSubmit bool) (fftypes.JSONObject, error) { + ret := _m.Called(ctx, op, idempotentSubmit) + + if len(ret) == 0 { + panic("no return value specified for RunOperation") } - var _ca []interface{} - _ca = append(_ca, ctx, op) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) var r0 fftypes.JSONObject var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *core.PreparedOperation, ...operations.RunOperationOption) (fftypes.JSONObject, error)); ok { - return rf(ctx, op, options...) + if rf, ok := ret.Get(0).(func(context.Context, *core.PreparedOperation, bool) (fftypes.JSONObject, error)); ok { + return rf(ctx, op, idempotentSubmit) } - if rf, ok := ret.Get(0).(func(context.Context, *core.PreparedOperation, ...operations.RunOperationOption) fftypes.JSONObject); ok { - r0 = rf(ctx, op, options...) + if rf, ok := ret.Get(0).(func(context.Context, *core.PreparedOperation, bool) fftypes.JSONObject); ok { + r0 = rf(ctx, op, idempotentSubmit) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(fftypes.JSONObject) } } - if rf, ok := ret.Get(1).(func(context.Context, *core.PreparedOperation, ...operations.RunOperationOption) error); ok { - r1 = rf(ctx, op, options...) + if rf, ok := ret.Get(1).(func(context.Context, *core.PreparedOperation, bool) error); ok { + r1 = rf(ctx, op, idempotentSubmit) } else { r1 = ret.Error(1) } @@ -201,6 +254,10 @@ func (_m *Manager) RunOperation(ctx context.Context, op *core.PreparedOperation, func (_m *Manager) Start() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Start") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -221,13 +278,12 @@ func (_m *Manager) WaitStop() { _m.Called() } -type mockConstructorTestingTNewManager interface { +// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewManager(t interface { mock.TestingT Cleanup(func()) -} - -// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewManager(t mockConstructorTestingTNewManager) *Manager { +}) *Manager { mock := &Manager{} mock.Mock.Test(t) diff --git a/mocks/orchestratormocks/orchestrator.go b/mocks/orchestratormocks/orchestrator.go index c876473a53..0bb4fb57d8 100644 --- a/mocks/orchestratormocks/orchestrator.go +++ b/mocks/orchestratormocks/orchestrator.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package orchestratormocks @@ -48,6 +48,10 @@ type Orchestrator struct { func (_m *Orchestrator) Assets() assets.Manager { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Assets") + } + var r0 assets.Manager if rf, ok := ret.Get(0).(func() assets.Manager); ok { r0 = rf() @@ -64,6 +68,10 @@ func (_m *Orchestrator) Assets() assets.Manager { func (_m *Orchestrator) Authorize(ctx context.Context, authReq *fftypes.AuthReq) error { ret := _m.Called(ctx, authReq) + if len(ret) == 0 { + panic("no return value specified for Authorize") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.AuthReq) error); ok { r0 = rf(ctx, authReq) @@ -78,6 +86,10 @@ func (_m *Orchestrator) Authorize(ctx context.Context, authReq *fftypes.AuthReq) func (_m *Orchestrator) BatchManager() batch.Manager { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for BatchManager") + } + var r0 batch.Manager if rf, ok := ret.Get(0).(func() batch.Manager); ok { r0 = rf() @@ -94,6 +106,10 @@ func (_m *Orchestrator) BatchManager() batch.Manager { func (_m *Orchestrator) Broadcast() broadcast.Manager { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Broadcast") + } + var r0 broadcast.Manager if rf, ok := ret.Get(0).(func() broadcast.Manager); ok { r0 = rf() @@ -110,6 +126,10 @@ func (_m *Orchestrator) Broadcast() broadcast.Manager { func (_m *Orchestrator) Contracts() contracts.Manager { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Contracts") + } + var r0 contracts.Manager if rf, ok := ret.Get(0).(func() contracts.Manager); ok { r0 = rf() @@ -126,6 +146,10 @@ func (_m *Orchestrator) Contracts() contracts.Manager { func (_m *Orchestrator) CreateSubscription(ctx context.Context, subDef *core.Subscription) (*core.Subscription, error) { ret := _m.Called(ctx, subDef) + if len(ret) == 0 { + panic("no return value specified for CreateSubscription") + } + var r0 *core.Subscription var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.Subscription) (*core.Subscription, error)); ok { @@ -152,6 +176,10 @@ func (_m *Orchestrator) CreateSubscription(ctx context.Context, subDef *core.Sub func (_m *Orchestrator) CreateUpdateSubscription(ctx context.Context, subDef *core.Subscription) (*core.Subscription, error) { ret := _m.Called(ctx, subDef) + if len(ret) == 0 { + panic("no return value specified for CreateUpdateSubscription") + } + var r0 *core.Subscription var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.Subscription) (*core.Subscription, error)); ok { @@ -178,6 +206,10 @@ func (_m *Orchestrator) CreateUpdateSubscription(ctx context.Context, subDef *co func (_m *Orchestrator) Data() data.Manager { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Data") + } + var r0 data.Manager if rf, ok := ret.Get(0).(func() data.Manager); ok { r0 = rf() @@ -194,6 +226,10 @@ func (_m *Orchestrator) Data() data.Manager { func (_m *Orchestrator) DefinitionSender() definitions.Sender { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for DefinitionSender") + } + var r0 definitions.Sender if rf, ok := ret.Get(0).(func() definitions.Sender); ok { r0 = rf() @@ -210,6 +246,10 @@ func (_m *Orchestrator) DefinitionSender() definitions.Sender { func (_m *Orchestrator) DeleteSubscription(ctx context.Context, id string) error { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for DeleteSubscription") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { r0 = rf(ctx, id) @@ -224,6 +264,10 @@ func (_m *Orchestrator) DeleteSubscription(ctx context.Context, id string) error func (_m *Orchestrator) Events() events.EventManager { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Events") + } + var r0 events.EventManager if rf, ok := ret.Get(0).(func() events.EventManager); ok { r0 = rf() @@ -240,6 +284,10 @@ func (_m *Orchestrator) Events() events.EventManager { func (_m *Orchestrator) GetBatchByID(ctx context.Context, id string) (*core.BatchPersisted, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetBatchByID") + } + var r0 *core.BatchPersisted var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.BatchPersisted, error)); ok { @@ -266,6 +314,10 @@ func (_m *Orchestrator) GetBatchByID(ctx context.Context, id string) (*core.Batc func (_m *Orchestrator) GetBatches(ctx context.Context, filter ffapi.AndFilter) ([]*core.BatchPersisted, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetBatches") + } + var r0 []*core.BatchPersisted var r1 *ffapi.FilterResult var r2 error @@ -301,6 +353,10 @@ func (_m *Orchestrator) GetBatches(ctx context.Context, filter ffapi.AndFilter) func (_m *Orchestrator) GetBlockchainEventByID(ctx context.Context, id string) (*core.BlockchainEvent, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetBlockchainEventByID") + } + var r0 *core.BlockchainEvent var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.BlockchainEvent, error)); ok { @@ -327,6 +383,10 @@ func (_m *Orchestrator) GetBlockchainEventByID(ctx context.Context, id string) ( func (_m *Orchestrator) GetBlockchainEvents(ctx context.Context, filter ffapi.AndFilter) ([]*core.BlockchainEvent, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetBlockchainEvents") + } + var r0 []*core.BlockchainEvent var r1 *ffapi.FilterResult var r2 error @@ -362,6 +422,10 @@ func (_m *Orchestrator) GetBlockchainEvents(ctx context.Context, filter ffapi.An func (_m *Orchestrator) GetChartHistogram(ctx context.Context, startTime int64, endTime int64, buckets int64, tableName database.CollectionName) ([]*core.ChartHistogram, error) { ret := _m.Called(ctx, startTime, endTime, buckets, tableName) + if len(ret) == 0 { + panic("no return value specified for GetChartHistogram") + } + var r0 []*core.ChartHistogram var r1 error if rf, ok := ret.Get(0).(func(context.Context, int64, int64, int64, database.CollectionName) ([]*core.ChartHistogram, error)); ok { @@ -388,6 +452,10 @@ func (_m *Orchestrator) GetChartHistogram(ctx context.Context, startTime int64, func (_m *Orchestrator) GetData(ctx context.Context, filter ffapi.AndFilter) (core.DataArray, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetData") + } + var r0 core.DataArray var r1 *ffapi.FilterResult var r2 error @@ -423,6 +491,10 @@ func (_m *Orchestrator) GetData(ctx context.Context, filter ffapi.AndFilter) (co func (_m *Orchestrator) GetDataByID(ctx context.Context, id string) (*core.Data, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetDataByID") + } + var r0 *core.Data var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.Data, error)); ok { @@ -449,6 +521,10 @@ func (_m *Orchestrator) GetDataByID(ctx context.Context, id string) (*core.Data, func (_m *Orchestrator) GetDataSubPaths(ctx context.Context, path string) ([]string, error) { ret := _m.Called(ctx, path) + if len(ret) == 0 { + panic("no return value specified for GetDataSubPaths") + } + var r0 []string var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) ([]string, error)); ok { @@ -475,6 +551,10 @@ func (_m *Orchestrator) GetDataSubPaths(ctx context.Context, path string) ([]str func (_m *Orchestrator) GetDatatypeByID(ctx context.Context, id string) (*core.Datatype, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetDatatypeByID") + } + var r0 *core.Datatype var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.Datatype, error)); ok { @@ -501,6 +581,10 @@ func (_m *Orchestrator) GetDatatypeByID(ctx context.Context, id string) (*core.D func (_m *Orchestrator) GetDatatypeByName(ctx context.Context, name string, version string) (*core.Datatype, error) { ret := _m.Called(ctx, name, version) + if len(ret) == 0 { + panic("no return value specified for GetDatatypeByName") + } + var r0 *core.Datatype var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, string) (*core.Datatype, error)); ok { @@ -527,6 +611,10 @@ func (_m *Orchestrator) GetDatatypeByName(ctx context.Context, name string, vers func (_m *Orchestrator) GetDatatypes(ctx context.Context, filter ffapi.AndFilter) ([]*core.Datatype, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetDatatypes") + } + var r0 []*core.Datatype var r1 *ffapi.FilterResult var r2 error @@ -562,6 +650,10 @@ func (_m *Orchestrator) GetDatatypes(ctx context.Context, filter ffapi.AndFilter func (_m *Orchestrator) GetEventByID(ctx context.Context, id string) (*core.Event, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetEventByID") + } + var r0 *core.Event var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.Event, error)); ok { @@ -588,6 +680,10 @@ func (_m *Orchestrator) GetEventByID(ctx context.Context, id string) (*core.Even func (_m *Orchestrator) GetEventByIDWithReference(ctx context.Context, id string) (*core.EnrichedEvent, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetEventByIDWithReference") + } + var r0 *core.EnrichedEvent var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.EnrichedEvent, error)); ok { @@ -614,6 +710,10 @@ func (_m *Orchestrator) GetEventByIDWithReference(ctx context.Context, id string func (_m *Orchestrator) GetEvents(ctx context.Context, filter ffapi.AndFilter) ([]*core.Event, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetEvents") + } + var r0 []*core.Event var r1 *ffapi.FilterResult var r2 error @@ -649,6 +749,10 @@ func (_m *Orchestrator) GetEvents(ctx context.Context, filter ffapi.AndFilter) ( func (_m *Orchestrator) GetEventsWithReferences(ctx context.Context, filter ffapi.AndFilter) ([]*core.EnrichedEvent, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetEventsWithReferences") + } + var r0 []*core.EnrichedEvent var r1 *ffapi.FilterResult var r2 error @@ -684,6 +788,10 @@ func (_m *Orchestrator) GetEventsWithReferences(ctx context.Context, filter ffap func (_m *Orchestrator) GetMessageByID(ctx context.Context, id string) (*core.Message, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetMessageByID") + } + var r0 *core.Message var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.Message, error)); ok { @@ -710,6 +818,10 @@ func (_m *Orchestrator) GetMessageByID(ctx context.Context, id string) (*core.Me func (_m *Orchestrator) GetMessageByIDWithData(ctx context.Context, id string) (*core.MessageInOut, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetMessageByIDWithData") + } + var r0 *core.MessageInOut var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.MessageInOut, error)); ok { @@ -736,6 +848,10 @@ func (_m *Orchestrator) GetMessageByIDWithData(ctx context.Context, id string) ( func (_m *Orchestrator) GetMessageData(ctx context.Context, id string) (core.DataArray, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetMessageData") + } + var r0 core.DataArray var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (core.DataArray, error)); ok { @@ -762,6 +878,10 @@ func (_m *Orchestrator) GetMessageData(ctx context.Context, id string) (core.Dat func (_m *Orchestrator) GetMessageEvents(ctx context.Context, id string, filter ffapi.AndFilter) ([]*core.Event, *ffapi.FilterResult, error) { ret := _m.Called(ctx, id, filter) + if len(ret) == 0 { + panic("no return value specified for GetMessageEvents") + } + var r0 []*core.Event var r1 *ffapi.FilterResult var r2 error @@ -797,6 +917,10 @@ func (_m *Orchestrator) GetMessageEvents(ctx context.Context, id string, filter func (_m *Orchestrator) GetMessageTransaction(ctx context.Context, id string) (*core.Transaction, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetMessageTransaction") + } + var r0 *core.Transaction var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.Transaction, error)); ok { @@ -823,6 +947,10 @@ func (_m *Orchestrator) GetMessageTransaction(ctx context.Context, id string) (* func (_m *Orchestrator) GetMessages(ctx context.Context, filter ffapi.AndFilter) ([]*core.Message, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetMessages") + } + var r0 []*core.Message var r1 *ffapi.FilterResult var r2 error @@ -858,6 +986,10 @@ func (_m *Orchestrator) GetMessages(ctx context.Context, filter ffapi.AndFilter) func (_m *Orchestrator) GetMessagesForData(ctx context.Context, dataID string, filter ffapi.AndFilter) ([]*core.Message, *ffapi.FilterResult, error) { ret := _m.Called(ctx, dataID, filter) + if len(ret) == 0 { + panic("no return value specified for GetMessagesForData") + } + var r0 []*core.Message var r1 *ffapi.FilterResult var r2 error @@ -893,6 +1025,10 @@ func (_m *Orchestrator) GetMessagesForData(ctx context.Context, dataID string, f func (_m *Orchestrator) GetMessagesWithData(ctx context.Context, filter ffapi.AndFilter) ([]*core.MessageInOut, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetMessagesWithData") + } + var r0 []*core.MessageInOut var r1 *ffapi.FilterResult var r2 error @@ -928,6 +1064,10 @@ func (_m *Orchestrator) GetMessagesWithData(ctx context.Context, filter ffapi.An func (_m *Orchestrator) GetNamespace(ctx context.Context) *core.Namespace { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for GetNamespace") + } + var r0 *core.Namespace if rf, ok := ret.Get(0).(func(context.Context) *core.Namespace); ok { r0 = rf(ctx) @@ -944,6 +1084,10 @@ func (_m *Orchestrator) GetNamespace(ctx context.Context) *core.Namespace { func (_m *Orchestrator) GetNextPins(ctx context.Context, filter ffapi.AndFilter) ([]*core.NextPin, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetNextPins") + } + var r0 []*core.NextPin var r1 *ffapi.FilterResult var r2 error @@ -979,6 +1123,10 @@ func (_m *Orchestrator) GetNextPins(ctx context.Context, filter ffapi.AndFilter) func (_m *Orchestrator) GetOperationByID(ctx context.Context, id string) (*core.Operation, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetOperationByID") + } + var r0 *core.Operation var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.Operation, error)); ok { @@ -1005,6 +1153,10 @@ func (_m *Orchestrator) GetOperationByID(ctx context.Context, id string) (*core. func (_m *Orchestrator) GetOperationByIDWithStatus(ctx context.Context, id string) (*core.OperationWithDetail, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetOperationByIDWithStatus") + } + var r0 *core.OperationWithDetail var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.OperationWithDetail, error)); ok { @@ -1031,6 +1183,10 @@ func (_m *Orchestrator) GetOperationByIDWithStatus(ctx context.Context, id strin func (_m *Orchestrator) GetOperations(ctx context.Context, filter ffapi.AndFilter) ([]*core.Operation, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetOperations") + } + var r0 []*core.Operation var r1 *ffapi.FilterResult var r2 error @@ -1066,6 +1222,10 @@ func (_m *Orchestrator) GetOperations(ctx context.Context, filter ffapi.AndFilte func (_m *Orchestrator) GetPins(ctx context.Context, filter ffapi.AndFilter) ([]*core.Pin, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetPins") + } + var r0 []*core.Pin var r1 *ffapi.FilterResult var r2 error @@ -1101,6 +1261,10 @@ func (_m *Orchestrator) GetPins(ctx context.Context, filter ffapi.AndFilter) ([] func (_m *Orchestrator) GetStatus(ctx context.Context) (*core.NamespaceStatus, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for GetStatus") + } + var r0 *core.NamespaceStatus var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*core.NamespaceStatus, error)); ok { @@ -1127,6 +1291,10 @@ func (_m *Orchestrator) GetStatus(ctx context.Context) (*core.NamespaceStatus, e func (_m *Orchestrator) GetSubscriptionByID(ctx context.Context, id string) (*core.Subscription, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetSubscriptionByID") + } + var r0 *core.Subscription var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.Subscription, error)); ok { @@ -1153,6 +1321,10 @@ func (_m *Orchestrator) GetSubscriptionByID(ctx context.Context, id string) (*co func (_m *Orchestrator) GetSubscriptionByIDWithStatus(ctx context.Context, id string) (*core.SubscriptionWithStatus, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetSubscriptionByIDWithStatus") + } + var r0 *core.SubscriptionWithStatus var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.SubscriptionWithStatus, error)); ok { @@ -1179,6 +1351,10 @@ func (_m *Orchestrator) GetSubscriptionByIDWithStatus(ctx context.Context, id st func (_m *Orchestrator) GetSubscriptions(ctx context.Context, filter ffapi.AndFilter) ([]*core.Subscription, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetSubscriptions") + } + var r0 []*core.Subscription var r1 *ffapi.FilterResult var r2 error @@ -1214,6 +1390,10 @@ func (_m *Orchestrator) GetSubscriptions(ctx context.Context, filter ffapi.AndFi func (_m *Orchestrator) GetTransactionBlockchainEvents(ctx context.Context, id string) ([]*core.BlockchainEvent, *ffapi.FilterResult, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetTransactionBlockchainEvents") + } + var r0 []*core.BlockchainEvent var r1 *ffapi.FilterResult var r2 error @@ -1249,6 +1429,10 @@ func (_m *Orchestrator) GetTransactionBlockchainEvents(ctx context.Context, id s func (_m *Orchestrator) GetTransactionByID(ctx context.Context, id string) (*core.Transaction, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetTransactionByID") + } + var r0 *core.Transaction var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.Transaction, error)); ok { @@ -1275,6 +1459,10 @@ func (_m *Orchestrator) GetTransactionByID(ctx context.Context, id string) (*cor func (_m *Orchestrator) GetTransactionOperations(ctx context.Context, id string) ([]*core.Operation, *ffapi.FilterResult, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetTransactionOperations") + } + var r0 []*core.Operation var r1 *ffapi.FilterResult var r2 error @@ -1310,6 +1498,10 @@ func (_m *Orchestrator) GetTransactionOperations(ctx context.Context, id string) func (_m *Orchestrator) GetTransactionStatus(ctx context.Context, id string) (*core.TransactionStatus, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetTransactionStatus") + } + var r0 *core.TransactionStatus var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.TransactionStatus, error)); ok { @@ -1336,6 +1528,10 @@ func (_m *Orchestrator) GetTransactionStatus(ctx context.Context, id string) (*c func (_m *Orchestrator) GetTransactions(ctx context.Context, filter ffapi.AndFilter) ([]*core.Transaction, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetTransactions") + } + var r0 []*core.Transaction var r1 *ffapi.FilterResult var r2 error @@ -1371,6 +1567,10 @@ func (_m *Orchestrator) GetTransactions(ctx context.Context, filter ffapi.AndFil func (_m *Orchestrator) Identity() identity.Manager { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Identity") + } + var r0 identity.Manager if rf, ok := ret.Get(0).(func() identity.Manager); ok { r0 = rf() @@ -1387,6 +1587,10 @@ func (_m *Orchestrator) Identity() identity.Manager { func (_m *Orchestrator) Init() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Init") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -1401,6 +1605,10 @@ func (_m *Orchestrator) Init() error { func (_m *Orchestrator) MultiParty() multiparty.Manager { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for MultiParty") + } + var r0 multiparty.Manager if rf, ok := ret.Get(0).(func() multiparty.Manager); ok { r0 = rf() @@ -1417,6 +1625,10 @@ func (_m *Orchestrator) MultiParty() multiparty.Manager { func (_m *Orchestrator) NetworkMap() networkmap.Manager { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for NetworkMap") + } + var r0 networkmap.Manager if rf, ok := ret.Get(0).(func() networkmap.Manager); ok { r0 = rf() @@ -1433,6 +1645,10 @@ func (_m *Orchestrator) NetworkMap() networkmap.Manager { func (_m *Orchestrator) Operations() operations.Manager { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Operations") + } + var r0 operations.Manager if rf, ok := ret.Get(0).(func() operations.Manager); ok { r0 = rf() @@ -1454,6 +1670,10 @@ func (_m *Orchestrator) PreInit(ctx context.Context, cancelCtx context.CancelFun func (_m *Orchestrator) PrivateMessaging() privatemessaging.Manager { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for PrivateMessaging") + } + var r0 privatemessaging.Manager if rf, ok := ret.Get(0).(func() privatemessaging.Manager); ok { r0 = rf() @@ -1470,6 +1690,10 @@ func (_m *Orchestrator) PrivateMessaging() privatemessaging.Manager { func (_m *Orchestrator) RequestReply(ctx context.Context, msg *core.MessageInOut) (*core.MessageInOut, error) { ret := _m.Called(ctx, msg) + if len(ret) == 0 { + panic("no return value specified for RequestReply") + } + var r0 *core.MessageInOut var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.MessageInOut) (*core.MessageInOut, error)); ok { @@ -1496,6 +1720,10 @@ func (_m *Orchestrator) RequestReply(ctx context.Context, msg *core.MessageInOut func (_m *Orchestrator) RewindPins(ctx context.Context, rewind *core.PinRewind) (*core.PinRewind, error) { ret := _m.Called(ctx, rewind) + if len(ret) == 0 { + panic("no return value specified for RewindPins") + } + var r0 *core.PinRewind var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.PinRewind) (*core.PinRewind, error)); ok { @@ -1522,6 +1750,10 @@ func (_m *Orchestrator) RewindPins(ctx context.Context, rewind *core.PinRewind) func (_m *Orchestrator) Start() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Start") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -1536,6 +1768,10 @@ func (_m *Orchestrator) Start() error { func (_m *Orchestrator) SubmitNetworkAction(ctx context.Context, action *core.NetworkAction) error { ret := _m.Called(ctx, action) + if len(ret) == 0 { + panic("no return value specified for SubmitNetworkAction") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.NetworkAction) error); ok { r0 = rf(ctx, action) @@ -1551,13 +1787,12 @@ func (_m *Orchestrator) WaitStop() { _m.Called() } -type mockConstructorTestingTNewOrchestrator interface { +// NewOrchestrator creates a new instance of Orchestrator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewOrchestrator(t interface { mock.TestingT Cleanup(func()) -} - -// NewOrchestrator creates a new instance of Orchestrator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewOrchestrator(t mockConstructorTestingTNewOrchestrator) *Orchestrator { +}) *Orchestrator { mock := &Orchestrator{} mock.Mock.Test(t) diff --git a/mocks/privatemessagingmocks/manager.go b/mocks/privatemessagingmocks/manager.go index 3617b1f477..1d051d1f63 100644 --- a/mocks/privatemessagingmocks/manager.go +++ b/mocks/privatemessagingmocks/manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package privatemessagingmocks @@ -24,6 +24,10 @@ type Manager struct { func (_m *Manager) EnsureLocalGroup(ctx context.Context, group *core.Group, creator *core.Member) (bool, error) { ret := _m.Called(ctx, group, creator) + if len(ret) == 0 { + panic("no return value specified for EnsureLocalGroup") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.Group, *core.Member) (bool, error)); ok { @@ -48,6 +52,10 @@ func (_m *Manager) EnsureLocalGroup(ctx context.Context, group *core.Group, crea func (_m *Manager) GetGroupByID(ctx context.Context, id string) (*core.Group, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetGroupByID") + } + var r0 *core.Group var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*core.Group, error)); ok { @@ -74,6 +82,10 @@ func (_m *Manager) GetGroupByID(ctx context.Context, id string) (*core.Group, er func (_m *Manager) GetGroups(ctx context.Context, filter ffapi.AndFilter) ([]*core.Group, *ffapi.FilterResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for GetGroups") + } + var r0 []*core.Group var r1 *ffapi.FilterResult var r2 error @@ -109,6 +121,10 @@ func (_m *Manager) GetGroups(ctx context.Context, filter ffapi.AndFilter) ([]*co func (_m *Manager) Name() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Name") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -123,6 +139,10 @@ func (_m *Manager) Name() string { func (_m *Manager) NewMessage(msg *core.MessageInOut) syncasync.Sender { ret := _m.Called(msg) + if len(ret) == 0 { + panic("no return value specified for NewMessage") + } + var r0 syncasync.Sender if rf, ok := ret.Get(0).(func(*core.MessageInOut) syncasync.Sender); ok { r0 = rf(msg) @@ -139,6 +159,10 @@ func (_m *Manager) NewMessage(msg *core.MessageInOut) syncasync.Sender { func (_m *Manager) PrepareOperation(ctx context.Context, op *core.Operation) (*core.PreparedOperation, error) { ret := _m.Called(ctx, op) + if len(ret) == 0 { + panic("no return value specified for PrepareOperation") + } + var r0 *core.PreparedOperation var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.Operation) (*core.PreparedOperation, error)); ok { @@ -165,6 +189,10 @@ func (_m *Manager) PrepareOperation(ctx context.Context, op *core.Operation) (*c func (_m *Manager) RequestReply(ctx context.Context, request *core.MessageInOut) (*core.MessageInOut, error) { ret := _m.Called(ctx, request) + if len(ret) == 0 { + panic("no return value specified for RequestReply") + } + var r0 *core.MessageInOut var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.MessageInOut) (*core.MessageInOut, error)); ok { @@ -191,6 +219,10 @@ func (_m *Manager) RequestReply(ctx context.Context, request *core.MessageInOut) func (_m *Manager) ResolveInitGroup(ctx context.Context, msg *core.Message, creator *core.Member) (*core.Group, error) { ret := _m.Called(ctx, msg, creator) + if len(ret) == 0 { + panic("no return value specified for ResolveInitGroup") + } + var r0 *core.Group var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.Message, *core.Member) (*core.Group, error)); ok { @@ -214,13 +246,17 @@ func (_m *Manager) ResolveInitGroup(ctx context.Context, msg *core.Message, crea } // RunOperation provides a mock function with given fields: ctx, op -func (_m *Manager) RunOperation(ctx context.Context, op *core.PreparedOperation) (fftypes.JSONObject, bool, error) { +func (_m *Manager) RunOperation(ctx context.Context, op *core.PreparedOperation) (fftypes.JSONObject, core.OpPhase, error) { ret := _m.Called(ctx, op) + if len(ret) == 0 { + panic("no return value specified for RunOperation") + } + var r0 fftypes.JSONObject - var r1 bool + var r1 core.OpPhase var r2 error - if rf, ok := ret.Get(0).(func(context.Context, *core.PreparedOperation) (fftypes.JSONObject, bool, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *core.PreparedOperation) (fftypes.JSONObject, core.OpPhase, error)); ok { return rf(ctx, op) } if rf, ok := ret.Get(0).(func(context.Context, *core.PreparedOperation) fftypes.JSONObject); ok { @@ -231,10 +267,10 @@ func (_m *Manager) RunOperation(ctx context.Context, op *core.PreparedOperation) } } - if rf, ok := ret.Get(1).(func(context.Context, *core.PreparedOperation) bool); ok { + if rf, ok := ret.Get(1).(func(context.Context, *core.PreparedOperation) core.OpPhase); ok { r1 = rf(ctx, op) } else { - r1 = ret.Get(1).(bool) + r1 = ret.Get(1).(core.OpPhase) } if rf, ok := ret.Get(2).(func(context.Context, *core.PreparedOperation) error); ok { @@ -250,6 +286,10 @@ func (_m *Manager) RunOperation(ctx context.Context, op *core.PreparedOperation) func (_m *Manager) SendMessage(ctx context.Context, in *core.MessageInOut, waitConfirm bool) (*core.Message, error) { ret := _m.Called(ctx, in, waitConfirm) + if len(ret) == 0 { + panic("no return value specified for SendMessage") + } + var r0 *core.Message var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.MessageInOut, bool) (*core.Message, error)); ok { @@ -272,13 +312,12 @@ func (_m *Manager) SendMessage(ctx context.Context, in *core.MessageInOut, waitC return r0, r1 } -type mockConstructorTestingTNewManager interface { +// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewManager(t interface { mock.TestingT Cleanup(func()) -} - -// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewManager(t mockConstructorTestingTNewManager) *Manager { +}) *Manager { mock := &Manager{} mock.Mock.Test(t) diff --git a/mocks/shareddownloadmocks/callbacks.go b/mocks/shareddownloadmocks/callbacks.go index 2e4aa227f0..4348a9b638 100644 --- a/mocks/shareddownloadmocks/callbacks.go +++ b/mocks/shareddownloadmocks/callbacks.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package shareddownloadmocks @@ -16,6 +16,10 @@ type Callbacks struct { func (_m *Callbacks) SharedStorageBatchDownloaded(payloadRef string, data []byte) (*fftypes.UUID, error) { ret := _m.Called(payloadRef, data) + if len(ret) == 0 { + panic("no return value specified for SharedStorageBatchDownloaded") + } + var r0 *fftypes.UUID var r1 error if rf, ok := ret.Get(0).(func(string, []byte) (*fftypes.UUID, error)); ok { @@ -42,6 +46,10 @@ func (_m *Callbacks) SharedStorageBatchDownloaded(payloadRef string, data []byte func (_m *Callbacks) SharedStorageBlobDownloaded(hash fftypes.Bytes32, size int64, payloadRef string, dataID *fftypes.UUID) error { ret := _m.Called(hash, size, payloadRef, dataID) + if len(ret) == 0 { + panic("no return value specified for SharedStorageBlobDownloaded") + } + var r0 error if rf, ok := ret.Get(0).(func(fftypes.Bytes32, int64, string, *fftypes.UUID) error); ok { r0 = rf(hash, size, payloadRef, dataID) @@ -52,13 +60,12 @@ func (_m *Callbacks) SharedStorageBlobDownloaded(hash fftypes.Bytes32, size int6 return r0 } -type mockConstructorTestingTNewCallbacks interface { +// NewCallbacks creates a new instance of Callbacks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCallbacks(t interface { mock.TestingT Cleanup(func()) -} - -// NewCallbacks creates a new instance of Callbacks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCallbacks(t mockConstructorTestingTNewCallbacks) *Callbacks { +}) *Callbacks { mock := &Callbacks{} mock.Mock.Test(t) diff --git a/mocks/shareddownloadmocks/manager.go b/mocks/shareddownloadmocks/manager.go index 0d80af0699..aad3eedb31 100644 --- a/mocks/shareddownloadmocks/manager.go +++ b/mocks/shareddownloadmocks/manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package shareddownloadmocks @@ -14,13 +14,17 @@ type Manager struct { mock.Mock } -// InitiateDownloadBatch provides a mock function with given fields: ctx, tx, payloadRef -func (_m *Manager) InitiateDownloadBatch(ctx context.Context, tx *fftypes.UUID, payloadRef string) error { - ret := _m.Called(ctx, tx, payloadRef) +// InitiateDownloadBatch provides a mock function with given fields: ctx, tx, payloadRef, idempotentSubmit +func (_m *Manager) InitiateDownloadBatch(ctx context.Context, tx *fftypes.UUID, payloadRef string, idempotentSubmit bool) error { + ret := _m.Called(ctx, tx, payloadRef, idempotentSubmit) + + if len(ret) == 0 { + panic("no return value specified for InitiateDownloadBatch") + } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID, string) error); ok { - r0 = rf(ctx, tx, payloadRef) + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID, string, bool) error); ok { + r0 = rf(ctx, tx, payloadRef, idempotentSubmit) } else { r0 = ret.Error(0) } @@ -28,13 +32,17 @@ func (_m *Manager) InitiateDownloadBatch(ctx context.Context, tx *fftypes.UUID, return r0 } -// InitiateDownloadBlob provides a mock function with given fields: ctx, tx, dataID, payloadRef -func (_m *Manager) InitiateDownloadBlob(ctx context.Context, tx *fftypes.UUID, dataID *fftypes.UUID, payloadRef string) error { - ret := _m.Called(ctx, tx, dataID, payloadRef) +// InitiateDownloadBlob provides a mock function with given fields: ctx, tx, dataID, payloadRef, idempotentSubmit +func (_m *Manager) InitiateDownloadBlob(ctx context.Context, tx *fftypes.UUID, dataID *fftypes.UUID, payloadRef string, idempotentSubmit bool) error { + ret := _m.Called(ctx, tx, dataID, payloadRef, idempotentSubmit) + + if len(ret) == 0 { + panic("no return value specified for InitiateDownloadBlob") + } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID, *fftypes.UUID, string) error); ok { - r0 = rf(ctx, tx, dataID, payloadRef) + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID, *fftypes.UUID, string, bool) error); ok { + r0 = rf(ctx, tx, dataID, payloadRef, idempotentSubmit) } else { r0 = ret.Error(0) } @@ -46,6 +54,10 @@ func (_m *Manager) InitiateDownloadBlob(ctx context.Context, tx *fftypes.UUID, d func (_m *Manager) Start() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Start") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -61,13 +73,12 @@ func (_m *Manager) WaitStop() { _m.Called() } -type mockConstructorTestingTNewManager interface { +// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewManager(t interface { mock.TestingT Cleanup(func()) -} - -// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewManager(t mockConstructorTestingTNewManager) *Manager { +}) *Manager { mock := &Manager{} mock.Mock.Test(t) diff --git a/mocks/sharedstoragemocks/callbacks.go b/mocks/sharedstoragemocks/callbacks.go index 679982a17d..002173b5f1 100644 --- a/mocks/sharedstoragemocks/callbacks.go +++ b/mocks/sharedstoragemocks/callbacks.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package sharedstoragemocks @@ -9,13 +9,12 @@ type Callbacks struct { mock.Mock } -type mockConstructorTestingTNewCallbacks interface { +// NewCallbacks creates a new instance of Callbacks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCallbacks(t interface { mock.TestingT Cleanup(func()) -} - -// NewCallbacks creates a new instance of Callbacks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCallbacks(t mockConstructorTestingTNewCallbacks) *Callbacks { +}) *Callbacks { mock := &Callbacks{} mock.Mock.Test(t) diff --git a/mocks/sharedstoragemocks/plugin.go b/mocks/sharedstoragemocks/plugin.go index 5c0121adcb..44f92f32de 100644 --- a/mocks/sharedstoragemocks/plugin.go +++ b/mocks/sharedstoragemocks/plugin.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package sharedstoragemocks @@ -23,6 +23,10 @@ type Plugin struct { func (_m *Plugin) Capabilities() *sharedstorage.Capabilities { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Capabilities") + } + var r0 *sharedstorage.Capabilities if rf, ok := ret.Get(0).(func() *sharedstorage.Capabilities); ok { r0 = rf() @@ -39,6 +43,10 @@ func (_m *Plugin) Capabilities() *sharedstorage.Capabilities { func (_m *Plugin) DownloadData(ctx context.Context, payloadRef string) (io.ReadCloser, error) { ret := _m.Called(ctx, payloadRef) + if len(ret) == 0 { + panic("no return value specified for DownloadData") + } + var r0 io.ReadCloser var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (io.ReadCloser, error)); ok { @@ -65,6 +73,10 @@ func (_m *Plugin) DownloadData(ctx context.Context, payloadRef string) (io.ReadC func (_m *Plugin) Init(ctx context.Context, _a1 config.Section) error { ret := _m.Called(ctx, _a1) + if len(ret) == 0 { + panic("no return value specified for Init") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, config.Section) error); ok { r0 = rf(ctx, _a1) @@ -84,6 +96,10 @@ func (_m *Plugin) InitConfig(_a0 config.Section) { func (_m *Plugin) Name() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Name") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -103,6 +119,10 @@ func (_m *Plugin) SetHandler(namespace string, handler sharedstorage.Callbacks) func (_m *Plugin) UploadData(ctx context.Context, data io.Reader) (string, error) { ret := _m.Called(ctx, data) + if len(ret) == 0 { + panic("no return value specified for UploadData") + } + var r0 string var r1 error if rf, ok := ret.Get(0).(func(context.Context, io.Reader) (string, error)); ok { @@ -123,13 +143,12 @@ func (_m *Plugin) UploadData(ctx context.Context, data io.Reader) (string, error return r0, r1 } -type mockConstructorTestingTNewPlugin interface { +// NewPlugin creates a new instance of Plugin. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPlugin(t interface { mock.TestingT Cleanup(func()) -} - -// NewPlugin creates a new instance of Plugin. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPlugin(t mockConstructorTestingTNewPlugin) *Plugin { +}) *Plugin { mock := &Plugin{} mock.Mock.Test(t) diff --git a/mocks/spieventsmocks/manager.go b/mocks/spieventsmocks/manager.go index 2113ae9a6d..7286e9b58b 100644 --- a/mocks/spieventsmocks/manager.go +++ b/mocks/spieventsmocks/manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package spieventsmocks @@ -30,13 +30,12 @@ func (_m *Manager) WaitStop() { _m.Called() } -type mockConstructorTestingTNewManager interface { +// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewManager(t interface { mock.TestingT Cleanup(func()) -} - -// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewManager(t mockConstructorTestingTNewManager) *Manager { +}) *Manager { mock := &Manager{} mock.Mock.Test(t) diff --git a/mocks/syncasyncmocks/bridge.go b/mocks/syncasyncmocks/bridge.go index 954a67ba86..57b9fcded8 100644 --- a/mocks/syncasyncmocks/bridge.go +++ b/mocks/syncasyncmocks/bridge.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package syncasyncmocks @@ -29,6 +29,10 @@ func (_m *Bridge) Init(sysevents system.EventInterface) { func (_m *Bridge) WaitForDeployOperation(ctx context.Context, id *fftypes.UUID, send syncasync.SendFunction) (*core.Operation, error) { ret := _m.Called(ctx, id, send) + if len(ret) == 0 { + panic("no return value specified for WaitForDeployOperation") + } + var r0 *core.Operation var r1 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID, syncasync.SendFunction) (*core.Operation, error)); ok { @@ -55,6 +59,10 @@ func (_m *Bridge) WaitForDeployOperation(ctx context.Context, id *fftypes.UUID, func (_m *Bridge) WaitForIdentity(ctx context.Context, id *fftypes.UUID, send syncasync.SendFunction) (*core.Identity, error) { ret := _m.Called(ctx, id, send) + if len(ret) == 0 { + panic("no return value specified for WaitForIdentity") + } + var r0 *core.Identity var r1 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID, syncasync.SendFunction) (*core.Identity, error)); ok { @@ -81,6 +89,10 @@ func (_m *Bridge) WaitForIdentity(ctx context.Context, id *fftypes.UUID, send sy func (_m *Bridge) WaitForInvokeOperation(ctx context.Context, id *fftypes.UUID, send syncasync.SendFunction) (*core.Operation, error) { ret := _m.Called(ctx, id, send) + if len(ret) == 0 { + panic("no return value specified for WaitForInvokeOperation") + } + var r0 *core.Operation var r1 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID, syncasync.SendFunction) (*core.Operation, error)); ok { @@ -107,6 +119,10 @@ func (_m *Bridge) WaitForInvokeOperation(ctx context.Context, id *fftypes.UUID, func (_m *Bridge) WaitForMessage(ctx context.Context, id *fftypes.UUID, send syncasync.SendFunction) (*core.Message, error) { ret := _m.Called(ctx, id, send) + if len(ret) == 0 { + panic("no return value specified for WaitForMessage") + } + var r0 *core.Message var r1 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID, syncasync.SendFunction) (*core.Message, error)); ok { @@ -133,6 +149,10 @@ func (_m *Bridge) WaitForMessage(ctx context.Context, id *fftypes.UUID, send syn func (_m *Bridge) WaitForReply(ctx context.Context, id *fftypes.UUID, send syncasync.SendFunction) (*core.MessageInOut, error) { ret := _m.Called(ctx, id, send) + if len(ret) == 0 { + panic("no return value specified for WaitForReply") + } + var r0 *core.MessageInOut var r1 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID, syncasync.SendFunction) (*core.MessageInOut, error)); ok { @@ -159,6 +179,10 @@ func (_m *Bridge) WaitForReply(ctx context.Context, id *fftypes.UUID, send synca func (_m *Bridge) WaitForTokenApproval(ctx context.Context, id *fftypes.UUID, send syncasync.SendFunction) (*core.TokenApproval, error) { ret := _m.Called(ctx, id, send) + if len(ret) == 0 { + panic("no return value specified for WaitForTokenApproval") + } + var r0 *core.TokenApproval var r1 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID, syncasync.SendFunction) (*core.TokenApproval, error)); ok { @@ -185,6 +209,10 @@ func (_m *Bridge) WaitForTokenApproval(ctx context.Context, id *fftypes.UUID, se func (_m *Bridge) WaitForTokenPool(ctx context.Context, id *fftypes.UUID, send syncasync.SendFunction) (*core.TokenPool, error) { ret := _m.Called(ctx, id, send) + if len(ret) == 0 { + panic("no return value specified for WaitForTokenPool") + } + var r0 *core.TokenPool var r1 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID, syncasync.SendFunction) (*core.TokenPool, error)); ok { @@ -211,6 +239,10 @@ func (_m *Bridge) WaitForTokenPool(ctx context.Context, id *fftypes.UUID, send s func (_m *Bridge) WaitForTokenTransfer(ctx context.Context, id *fftypes.UUID, send syncasync.SendFunction) (*core.TokenTransfer, error) { ret := _m.Called(ctx, id, send) + if len(ret) == 0 { + panic("no return value specified for WaitForTokenTransfer") + } + var r0 *core.TokenTransfer var r1 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID, syncasync.SendFunction) (*core.TokenTransfer, error)); ok { @@ -233,13 +265,12 @@ func (_m *Bridge) WaitForTokenTransfer(ctx context.Context, id *fftypes.UUID, se return r0, r1 } -type mockConstructorTestingTNewBridge interface { +// NewBridge creates a new instance of Bridge. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBridge(t interface { mock.TestingT Cleanup(func()) -} - -// NewBridge creates a new instance of Bridge. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBridge(t mockConstructorTestingTNewBridge) *Bridge { +}) *Bridge { mock := &Bridge{} mock.Mock.Test(t) diff --git a/mocks/syncasyncmocks/sender.go b/mocks/syncasyncmocks/sender.go index cb51639446..8f68f26c96 100644 --- a/mocks/syncasyncmocks/sender.go +++ b/mocks/syncasyncmocks/sender.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package syncasyncmocks @@ -17,6 +17,10 @@ type Sender struct { func (_m *Sender) Prepare(ctx context.Context) error { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for Prepare") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context) error); ok { r0 = rf(ctx) @@ -31,6 +35,10 @@ func (_m *Sender) Prepare(ctx context.Context) error { func (_m *Sender) Send(ctx context.Context) error { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for Send") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context) error); ok { r0 = rf(ctx) @@ -45,6 +53,10 @@ func (_m *Sender) Send(ctx context.Context) error { func (_m *Sender) SendAndWait(ctx context.Context) error { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for SendAndWait") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context) error); ok { r0 = rf(ctx) @@ -55,13 +67,12 @@ func (_m *Sender) SendAndWait(ctx context.Context) error { return r0 } -type mockConstructorTestingTNewSender interface { +// NewSender creates a new instance of Sender. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSender(t interface { mock.TestingT Cleanup(func()) -} - -// NewSender creates a new instance of Sender. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewSender(t mockConstructorTestingTNewSender) *Sender { +}) *Sender { mock := &Sender{} mock.Mock.Test(t) diff --git a/mocks/systemeventmocks/event_interface.go b/mocks/systemeventmocks/event_interface.go index 6089636b84..23db5f2059 100644 --- a/mocks/systemeventmocks/event_interface.go +++ b/mocks/systemeventmocks/event_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package systemeventmocks @@ -16,6 +16,10 @@ type EventInterface struct { func (_m *EventInterface) AddSystemEventListener(ns string, el system.EventListener) error { ret := _m.Called(ns, el) + if len(ret) == 0 { + panic("no return value specified for AddSystemEventListener") + } + var r0 error if rf, ok := ret.Get(0).(func(string, system.EventListener) error); ok { r0 = rf(ns, el) @@ -26,13 +30,12 @@ func (_m *EventInterface) AddSystemEventListener(ns string, el system.EventListe return r0 } -type mockConstructorTestingTNewEventInterface interface { +// NewEventInterface creates a new instance of EventInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEventInterface(t interface { mock.TestingT Cleanup(func()) -} - -// NewEventInterface creates a new instance of EventInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewEventInterface(t mockConstructorTestingTNewEventInterface) *EventInterface { +}) *EventInterface { mock := &EventInterface{} mock.Mock.Test(t) diff --git a/mocks/tokenmocks/callbacks.go b/mocks/tokenmocks/callbacks.go index 3c19a8ecdf..17104d6520 100644 --- a/mocks/tokenmocks/callbacks.go +++ b/mocks/tokenmocks/callbacks.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package tokenmocks @@ -18,6 +18,10 @@ type Callbacks struct { func (_m *Callbacks) TokenPoolCreated(ctx context.Context, plugin tokens.Plugin, pool *tokens.TokenPool) error { ret := _m.Called(ctx, plugin, pool) + if len(ret) == 0 { + panic("no return value specified for TokenPoolCreated") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, tokens.Plugin, *tokens.TokenPool) error); ok { r0 = rf(ctx, plugin, pool) @@ -32,6 +36,10 @@ func (_m *Callbacks) TokenPoolCreated(ctx context.Context, plugin tokens.Plugin, func (_m *Callbacks) TokensApproved(plugin tokens.Plugin, approval *tokens.TokenApproval) error { ret := _m.Called(plugin, approval) + if len(ret) == 0 { + panic("no return value specified for TokensApproved") + } + var r0 error if rf, ok := ret.Get(0).(func(tokens.Plugin, *tokens.TokenApproval) error); ok { r0 = rf(plugin, approval) @@ -46,6 +54,10 @@ func (_m *Callbacks) TokensApproved(plugin tokens.Plugin, approval *tokens.Token func (_m *Callbacks) TokensTransferred(plugin tokens.Plugin, transfer *tokens.TokenTransfer) error { ret := _m.Called(plugin, transfer) + if len(ret) == 0 { + panic("no return value specified for TokensTransferred") + } + var r0 error if rf, ok := ret.Get(0).(func(tokens.Plugin, *tokens.TokenTransfer) error); ok { r0 = rf(plugin, transfer) @@ -56,13 +68,12 @@ func (_m *Callbacks) TokensTransferred(plugin tokens.Plugin, transfer *tokens.To return r0 } -type mockConstructorTestingTNewCallbacks interface { +// NewCallbacks creates a new instance of Callbacks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCallbacks(t interface { mock.TestingT Cleanup(func()) -} - -// NewCallbacks creates a new instance of Callbacks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCallbacks(t mockConstructorTestingTNewCallbacks) *Callbacks { +}) *Callbacks { mock := &Callbacks{} mock.Mock.Test(t) diff --git a/mocks/tokenmocks/plugin.go b/mocks/tokenmocks/plugin.go index 9abe346973..03a5e09cfa 100644 --- a/mocks/tokenmocks/plugin.go +++ b/mocks/tokenmocks/plugin.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package tokenmocks @@ -21,23 +21,27 @@ type Plugin struct { mock.Mock } -// ActivateTokenPool provides a mock function with given fields: ctx, nsOpID, pool -func (_m *Plugin) ActivateTokenPool(ctx context.Context, nsOpID string, pool *core.TokenPool) (bool, error) { - ret := _m.Called(ctx, nsOpID, pool) +// ActivateTokenPool provides a mock function with given fields: ctx, pool +func (_m *Plugin) ActivateTokenPool(ctx context.Context, pool *core.TokenPool) (core.OpPhase, error) { + ret := _m.Called(ctx, pool) + + if len(ret) == 0 { + panic("no return value specified for ActivateTokenPool") + } - var r0 bool + var r0 core.OpPhase var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, *core.TokenPool) (bool, error)); ok { - return rf(ctx, nsOpID, pool) + if rf, ok := ret.Get(0).(func(context.Context, *core.TokenPool) (core.OpPhase, error)); ok { + return rf(ctx, pool) } - if rf, ok := ret.Get(0).(func(context.Context, string, *core.TokenPool) bool); ok { - r0 = rf(ctx, nsOpID, pool) + if rf, ok := ret.Get(0).(func(context.Context, *core.TokenPool) core.OpPhase); ok { + r0 = rf(ctx, pool) } else { - r0 = ret.Get(0).(bool) + r0 = ret.Get(0).(core.OpPhase) } - if rf, ok := ret.Get(1).(func(context.Context, string, *core.TokenPool) error); ok { - r1 = rf(ctx, nsOpID, pool) + if rf, ok := ret.Get(1).(func(context.Context, *core.TokenPool) error); ok { + r1 = rf(ctx, pool) } else { r1 = ret.Error(1) } @@ -49,6 +53,10 @@ func (_m *Plugin) ActivateTokenPool(ctx context.Context, nsOpID string, pool *co func (_m *Plugin) BurnTokens(ctx context.Context, nsOpID string, poolLocator string, burn *core.TokenTransfer, methods *fftypes.JSONAny) error { ret := _m.Called(ctx, nsOpID, poolLocator, burn, methods) + if len(ret) == 0 { + panic("no return value specified for BurnTokens") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, string, *core.TokenTransfer, *fftypes.JSONAny) error); ok { r0 = rf(ctx, nsOpID, poolLocator, burn, methods) @@ -63,6 +71,10 @@ func (_m *Plugin) BurnTokens(ctx context.Context, nsOpID string, poolLocator str func (_m *Plugin) Capabilities() *tokens.Capabilities { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Capabilities") + } + var r0 *tokens.Capabilities if rf, ok := ret.Get(0).(func() *tokens.Capabilities); ok { r0 = rf() @@ -79,6 +91,10 @@ func (_m *Plugin) Capabilities() *tokens.Capabilities { func (_m *Plugin) CheckInterface(ctx context.Context, pool *core.TokenPool, methods []*fftypes.FFIMethod) (*fftypes.JSONAny, error) { ret := _m.Called(ctx, pool, methods) + if len(ret) == 0 { + panic("no return value specified for CheckInterface") + } + var r0 *fftypes.JSONAny var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.TokenPool, []*fftypes.FFIMethod) (*fftypes.JSONAny, error)); ok { @@ -102,18 +118,22 @@ func (_m *Plugin) CheckInterface(ctx context.Context, pool *core.TokenPool, meth } // CreateTokenPool provides a mock function with given fields: ctx, nsOpID, pool -func (_m *Plugin) CreateTokenPool(ctx context.Context, nsOpID string, pool *core.TokenPool) (bool, error) { +func (_m *Plugin) CreateTokenPool(ctx context.Context, nsOpID string, pool *core.TokenPool) (core.OpPhase, error) { ret := _m.Called(ctx, nsOpID, pool) - var r0 bool + if len(ret) == 0 { + panic("no return value specified for CreateTokenPool") + } + + var r0 core.OpPhase var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, *core.TokenPool) (bool, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, string, *core.TokenPool) (core.OpPhase, error)); ok { return rf(ctx, nsOpID, pool) } - if rf, ok := ret.Get(0).(func(context.Context, string, *core.TokenPool) bool); ok { + if rf, ok := ret.Get(0).(func(context.Context, string, *core.TokenPool) core.OpPhase); ok { r0 = rf(ctx, nsOpID, pool) } else { - r0 = ret.Get(0).(bool) + r0 = ret.Get(0).(core.OpPhase) } if rf, ok := ret.Get(1).(func(context.Context, string, *core.TokenPool) error); ok { @@ -125,10 +145,32 @@ func (_m *Plugin) CreateTokenPool(ctx context.Context, nsOpID string, pool *core return r0, r1 } +// DeactivateTokenPool provides a mock function with given fields: ctx, pool +func (_m *Plugin) DeactivateTokenPool(ctx context.Context, pool *core.TokenPool) error { + ret := _m.Called(ctx, pool) + + if len(ret) == 0 { + panic("no return value specified for DeactivateTokenPool") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *core.TokenPool) error); ok { + r0 = rf(ctx, pool) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // Init provides a mock function with given fields: ctx, cancelCtx, name, _a3 func (_m *Plugin) Init(ctx context.Context, cancelCtx context.CancelFunc, name string, _a3 config.Section) error { ret := _m.Called(ctx, cancelCtx, name, _a3) + if len(ret) == 0 { + panic("no return value specified for Init") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, context.CancelFunc, string, config.Section) error); ok { r0 = rf(ctx, cancelCtx, name, _a3) @@ -148,6 +190,10 @@ func (_m *Plugin) InitConfig(_a0 config.Section) { func (_m *Plugin) MintTokens(ctx context.Context, nsOpID string, poolLocator string, mint *core.TokenTransfer, methods *fftypes.JSONAny) error { ret := _m.Called(ctx, nsOpID, poolLocator, mint, methods) + if len(ret) == 0 { + panic("no return value specified for MintTokens") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, string, *core.TokenTransfer, *fftypes.JSONAny) error); ok { r0 = rf(ctx, nsOpID, poolLocator, mint, methods) @@ -162,6 +208,10 @@ func (_m *Plugin) MintTokens(ctx context.Context, nsOpID string, poolLocator str func (_m *Plugin) Name() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Name") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -186,6 +236,10 @@ func (_m *Plugin) SetOperationHandler(namespace string, handler core.OperationCa func (_m *Plugin) Start() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Start") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -200,6 +254,10 @@ func (_m *Plugin) Start() error { func (_m *Plugin) TokensApproval(ctx context.Context, nsOpID string, poolLocator string, approval *core.TokenApproval, methods *fftypes.JSONAny) error { ret := _m.Called(ctx, nsOpID, poolLocator, approval, methods) + if len(ret) == 0 { + panic("no return value specified for TokensApproval") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, string, *core.TokenApproval, *fftypes.JSONAny) error); ok { r0 = rf(ctx, nsOpID, poolLocator, approval, methods) @@ -214,6 +272,10 @@ func (_m *Plugin) TokensApproval(ctx context.Context, nsOpID string, poolLocator func (_m *Plugin) TransferTokens(ctx context.Context, nsOpID string, poolLocator string, transfer *core.TokenTransfer, methods *fftypes.JSONAny) error { ret := _m.Called(ctx, nsOpID, poolLocator, transfer, methods) + if len(ret) == 0 { + panic("no return value specified for TransferTokens") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, string, *core.TokenTransfer, *fftypes.JSONAny) error); ok { r0 = rf(ctx, nsOpID, poolLocator, transfer, methods) @@ -224,13 +286,12 @@ func (_m *Plugin) TransferTokens(ctx context.Context, nsOpID string, poolLocator return r0 } -type mockConstructorTestingTNewPlugin interface { +// NewPlugin creates a new instance of Plugin. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPlugin(t interface { mock.TestingT Cleanup(func()) -} - -// NewPlugin creates a new instance of Plugin. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPlugin(t mockConstructorTestingTNewPlugin) *Plugin { +}) *Plugin { mock := &Plugin{} mock.Mock.Test(t) diff --git a/mocks/txcommonmocks/helper.go b/mocks/txcommonmocks/helper.go index 55a2e8f98b..083bf6c9b3 100644 --- a/mocks/txcommonmocks/helper.go +++ b/mocks/txcommonmocks/helper.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package txcommonmocks @@ -9,6 +9,8 @@ import ( core "github.com/hyperledger/firefly/pkg/core" mock "github.com/stretchr/testify/mock" + + txcommon "github.com/hyperledger/firefly/internal/txcommon" ) // Helper is an autogenerated mock type for the Helper type @@ -20,6 +22,10 @@ type Helper struct { func (_m *Helper) AddBlockchainTX(ctx context.Context, tx *core.Transaction, blockchainTXID string) error { ret := _m.Called(ctx, tx, blockchainTXID) + if len(ret) == 0 { + panic("no return value specified for AddBlockchainTX") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *core.Transaction, string) error); ok { r0 = rf(ctx, tx, blockchainTXID) @@ -34,6 +40,10 @@ func (_m *Helper) AddBlockchainTX(ctx context.Context, tx *core.Transaction, blo func (_m *Helper) FindOperationInTransaction(ctx context.Context, tx *fftypes.UUID, opType fftypes.FFEnum) (*core.Operation, error) { ret := _m.Called(ctx, tx, opType) + if len(ret) == 0 { + panic("no return value specified for FindOperationInTransaction") + } + var r0 *core.Operation var r1 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID, fftypes.FFEnum) (*core.Operation, error)); ok { @@ -60,6 +70,10 @@ func (_m *Helper) FindOperationInTransaction(ctx context.Context, tx *fftypes.UU func (_m *Helper) GetBlockchainEventByIDCached(ctx context.Context, id *fftypes.UUID) (*core.BlockchainEvent, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetBlockchainEventByIDCached") + } + var r0 *core.BlockchainEvent var r1 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID) (*core.BlockchainEvent, error)); ok { @@ -86,6 +100,10 @@ func (_m *Helper) GetBlockchainEventByIDCached(ctx context.Context, id *fftypes. func (_m *Helper) GetTransactionByIDCached(ctx context.Context, id *fftypes.UUID) (*core.Transaction, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetTransactionByIDCached") + } + var r0 *core.Transaction var r1 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID) (*core.Transaction, error)); ok { @@ -108,10 +126,44 @@ func (_m *Helper) GetTransactionByIDCached(ctx context.Context, id *fftypes.UUID return r0, r1 } +// InsertNewBlockchainEvents provides a mock function with given fields: ctx, events +func (_m *Helper) InsertNewBlockchainEvents(ctx context.Context, events []*core.BlockchainEvent) ([]*core.BlockchainEvent, error) { + ret := _m.Called(ctx, events) + + if len(ret) == 0 { + panic("no return value specified for InsertNewBlockchainEvents") + } + + var r0 []*core.BlockchainEvent + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []*core.BlockchainEvent) ([]*core.BlockchainEvent, error)); ok { + return rf(ctx, events) + } + if rf, ok := ret.Get(0).(func(context.Context, []*core.BlockchainEvent) []*core.BlockchainEvent); ok { + r0 = rf(ctx, events) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*core.BlockchainEvent) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []*core.BlockchainEvent) error); ok { + r1 = rf(ctx, events) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // InsertOrGetBlockchainEvent provides a mock function with given fields: ctx, event func (_m *Helper) InsertOrGetBlockchainEvent(ctx context.Context, event *core.BlockchainEvent) (*core.BlockchainEvent, error) { ret := _m.Called(ctx, event) + if len(ret) == 0 { + panic("no return value specified for InsertOrGetBlockchainEvent") + } + var r0 *core.BlockchainEvent var r1 error if rf, ok := ret.Get(0).(func(context.Context, *core.BlockchainEvent) (*core.BlockchainEvent, error)); ok { @@ -138,6 +190,10 @@ func (_m *Helper) InsertOrGetBlockchainEvent(ctx context.Context, event *core.Bl func (_m *Helper) PersistTransaction(ctx context.Context, id *fftypes.UUID, txType fftypes.FFEnum, blockchainTXID string) (bool, error) { ret := _m.Called(ctx, id, txType, blockchainTXID) + if len(ret) == 0 { + panic("no return value specified for PersistTransaction") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID, fftypes.FFEnum, string) (bool, error)); ok { @@ -162,6 +218,10 @@ func (_m *Helper) PersistTransaction(ctx context.Context, id *fftypes.UUID, txTy func (_m *Helper) SubmitNewTransaction(ctx context.Context, txType fftypes.FFEnum, idempotencyKey core.IdempotencyKey) (*fftypes.UUID, error) { ret := _m.Called(ctx, txType, idempotencyKey) + if len(ret) == 0 { + panic("no return value specified for SubmitNewTransaction") + } + var r0 *fftypes.UUID var r1 error if rf, ok := ret.Get(0).(func(context.Context, fftypes.FFEnum, core.IdempotencyKey) (*fftypes.UUID, error)); ok { @@ -184,13 +244,30 @@ func (_m *Helper) SubmitNewTransaction(ctx context.Context, txType fftypes.FFEnu return r0, r1 } -type mockConstructorTestingTNewHelper interface { - mock.TestingT - Cleanup(func()) +// SubmitNewTransactionBatch provides a mock function with given fields: ctx, namespace, batch +func (_m *Helper) SubmitNewTransactionBatch(ctx context.Context, namespace string, batch []*txcommon.BatchedTransactionInsert) error { + ret := _m.Called(ctx, namespace, batch) + + if len(ret) == 0 { + panic("no return value specified for SubmitNewTransactionBatch") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, []*txcommon.BatchedTransactionInsert) error); ok { + r0 = rf(ctx, namespace, batch) + } else { + r0 = ret.Error(0) + } + + return r0 } // NewHelper creates a new instance of Helper. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewHelper(t mockConstructorTestingTNewHelper) *Helper { +// The first argument is typically a *testing.T value. +func NewHelper(t interface { + mock.TestingT + Cleanup(func()) +}) *Helper { mock := &Helper{} mock.Mock.Test(t) diff --git a/mocks/txwritermocks/writer.go b/mocks/txwritermocks/writer.go new file mode 100644 index 0000000000..7facf76e67 --- /dev/null +++ b/mocks/txwritermocks/writer.go @@ -0,0 +1,78 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package txwritermocks + +import ( + context "context" + + fftypes "github.com/hyperledger/firefly-common/pkg/fftypes" + core "github.com/hyperledger/firefly/pkg/core" + + mock "github.com/stretchr/testify/mock" +) + +// Writer is an autogenerated mock type for the Writer type +type Writer struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *Writer) Close() { + _m.Called() +} + +// Start provides a mock function with given fields: +func (_m *Writer) Start() { + _m.Called() +} + +// WriteTransactionAndOps provides a mock function with given fields: ctx, txType, idempotencyKey, operations +func (_m *Writer) WriteTransactionAndOps(ctx context.Context, txType fftypes.FFEnum, idempotencyKey core.IdempotencyKey, operations ...*core.Operation) (*core.Transaction, error) { + _va := make([]interface{}, len(operations)) + for _i := range operations { + _va[_i] = operations[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, txType, idempotencyKey) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for WriteTransactionAndOps") + } + + var r0 *core.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, fftypes.FFEnum, core.IdempotencyKey, ...*core.Operation) (*core.Transaction, error)); ok { + return rf(ctx, txType, idempotencyKey, operations...) + } + if rf, ok := ret.Get(0).(func(context.Context, fftypes.FFEnum, core.IdempotencyKey, ...*core.Operation) *core.Transaction); ok { + r0 = rf(ctx, txType, idempotencyKey, operations...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*core.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, fftypes.FFEnum, core.IdempotencyKey, ...*core.Operation) error); ok { + r1 = rf(ctx, txType, idempotencyKey, operations...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewWriter creates a new instance of Writer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewWriter(t interface { + mock.TestingT + Cleanup(func()) +}) *Writer { + mock := &Writer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/mocks/websocketsmocks/web_sockets_namespaced.go b/mocks/websocketsmocks/web_sockets_namespaced.go new file mode 100644 index 0000000000..9c1e94c048 --- /dev/null +++ b/mocks/websocketsmocks/web_sockets_namespaced.go @@ -0,0 +1,33 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package websocketsmocks + +import ( + http "net/http" + + mock "github.com/stretchr/testify/mock" +) + +// WebSocketsNamespaced is an autogenerated mock type for the WebSocketsNamespaced type +type WebSocketsNamespaced struct { + mock.Mock +} + +// ServeHTTPNamespaced provides a mock function with given fields: namespace, res, req +func (_m *WebSocketsNamespaced) ServeHTTPNamespaced(namespace string, res http.ResponseWriter, req *http.Request) { + _m.Called(namespace, res, req) +} + +// NewWebSocketsNamespaced creates a new instance of WebSocketsNamespaced. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewWebSocketsNamespaced(t interface { + mock.TestingT + Cleanup(func()) +}) *WebSocketsNamespaced { + mock := &WebSocketsNamespaced{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/mocks/wsmocks/ws_client.go b/mocks/wsmocks/ws_client.go index e647f9cea3..00bbbcfaaa 100644 --- a/mocks/wsmocks/ws_client.go +++ b/mocks/wsmocks/ws_client.go @@ -1,10 +1,11 @@ -// Code generated by mockery v2.20.2. DO NOT EDIT. +// Code generated by mockery v2.38.0. DO NOT EDIT. package wsmocks import ( context "context" + wsclient "github.com/hyperledger/firefly-common/pkg/wsclient" mock "github.com/stretchr/testify/mock" ) @@ -22,6 +23,10 @@ func (_m *WSClient) Close() { func (_m *WSClient) Connect() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Connect") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -36,6 +41,10 @@ func (_m *WSClient) Connect() error { func (_m *WSClient) Receive() <-chan []byte { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Receive") + } + var r0 <-chan []byte if rf, ok := ret.Get(0).(func() <-chan []byte); ok { r0 = rf() @@ -48,10 +57,34 @@ func (_m *WSClient) Receive() <-chan []byte { return r0 } +// ReceiveExt provides a mock function with given fields: +func (_m *WSClient) ReceiveExt() <-chan *wsclient.WSPayload { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ReceiveExt") + } + + var r0 <-chan *wsclient.WSPayload + if rf, ok := ret.Get(0).(func() <-chan *wsclient.WSPayload); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan *wsclient.WSPayload) + } + } + + return r0 +} + // Send provides a mock function with given fields: ctx, message func (_m *WSClient) Send(ctx context.Context, message []byte) error { ret := _m.Called(ctx, message) + if len(ret) == 0 { + panic("no return value specified for Send") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, []byte) error); ok { r0 = rf(ctx, message) @@ -62,6 +95,11 @@ func (_m *WSClient) Send(ctx context.Context, message []byte) error { return r0 } +// SetHeader provides a mock function with given fields: header, value +func (_m *WSClient) SetHeader(header string, value string) { + _m.Called(header, value) +} + // SetURL provides a mock function with given fields: url func (_m *WSClient) SetURL(url string) { _m.Called(url) @@ -71,6 +109,10 @@ func (_m *WSClient) SetURL(url string) { func (_m *WSClient) URL() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for URL") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -81,13 +123,12 @@ func (_m *WSClient) URL() string { return r0 } -type mockConstructorTestingTNewWSClient interface { +// NewWSClient creates a new instance of WSClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewWSClient(t interface { mock.TestingT Cleanup(func()) -} - -// NewWSClient creates a new instance of WSClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewWSClient(t mockConstructorTestingTNewWSClient) *WSClient { +}) *WSClient { mock := &WSClient{} mock.Mock.Test(t) diff --git a/pkg/blockchain/plugin.go b/pkg/blockchain/plugin.go index c206860c90..cd576772ab 100644 --- a/pkg/blockchain/plugin.go +++ b/pkg/blockchain/plugin.go @@ -78,16 +78,20 @@ type Plugin interface { SubmitNetworkAction(ctx context.Context, nsOpID, signingKey string, action core.NetworkActionType, location *fftypes.JSONAny) error // DeployContract submits a new transaction to deploy a new instance of a smart contract - DeployContract(ctx context.Context, nsOpID, signingKey string, definition, contract *fftypes.JSONAny, input []interface{}, options map[string]interface{}) error + DeployContract(ctx context.Context, nsOpID, signingKey string, definition, contract *fftypes.JSONAny, input []interface{}, options map[string]interface{}) (submissionRejected bool, err error) - // ValidateInvokeRequest performs pre-flight validation of a method call, e.g. to check that parameter formats are correct - ValidateInvokeRequest(ctx context.Context, method *fftypes.FFIMethod, input map[string]interface{}, errors []*fftypes.FFIError, hasMessage bool) error + // ParseInterface processes an FFIMethod and FFIError array into a blockchain specific object, that will be + // cached for this given interface, and passed back on all future invocations. + ParseInterface(ctx context.Context, method *fftypes.FFIMethod, errors []*fftypes.FFIError) (interface{}, error) + + // ValidateInvokeRequest performs pre-flight validation of a method call + ValidateInvokeRequest(ctx context.Context, parsedMethod interface{}, input map[string]interface{}, hasMessage bool) error // InvokeContract submits a new transaction to be executed by custom on-chain logic - InvokeContract(ctx context.Context, nsOpID, signingKey string, location *fftypes.JSONAny, method *fftypes.FFIMethod, input map[string]interface{}, errors []*fftypes.FFIError, options map[string]interface{}, batch *BatchPin) error + InvokeContract(ctx context.Context, nsOpID, signingKey string, location *fftypes.JSONAny, parsedMethod interface{}, input map[string]interface{}, options map[string]interface{}, batch *BatchPin) (submissionRejected bool, err error) // QueryContract executes a method via custom on-chain logic and returns the result - QueryContract(ctx context.Context, signingKey string, location *fftypes.JSONAny, method *fftypes.FFIMethod, input map[string]interface{}, errors []*fftypes.FFIError, options map[string]interface{}) (interface{}, error) + QueryContract(ctx context.Context, signingKey string, location *fftypes.JSONAny, parsedMethod interface{}, input map[string]interface{}, options map[string]interface{}) (interface{}, error) // AddContractListener adds a new subscription to a user-specified contract and event AddContractListener(ctx context.Context, subscription *core.ContractListener) error @@ -138,25 +142,59 @@ const ( const FireFlyActionPrefix = "firefly:" +type EventType int + +const ( + EventTypeBatchPinComplete EventType = iota + EventTypeNetworkAction + EventTypeForListener +) + +// BatchPinComplete notifies on the arrival of a sequenced batch of messages, which might have been +// submitted by us, or by any other authorized party in the network. +type BatchPinCompleteEvent struct { + Namespace string + Batch *BatchPin + SigningKey *core.VerifierRef +} + +// BlockchainNetworkAction notifies on the arrival of a network operator action +type NetworkActionEvent struct { + Action string + Location *fftypes.JSONAny + Event *Event + SigningKey *core.VerifierRef +} + +// EventForListener notifies on the arrival of any event from a user-created listener. +type EventForListener struct { + *Event + // ListenerID is the ID assigned to a custom contract listener by the connector + ListenerID string +} + +// EventToDispatch is a wrapper around the other event types, to allow them to be dispatched as a group +type EventToDispatch struct { + Type EventType + BatchPinComplete *BatchPinCompleteEvent + NetworkAction *NetworkActionEvent + ForListener *EventForListener +} + // Callbacks is the interface provided to the blockchain plugin, to allow it to pass events back to firefly. // // Events must be delivered sequentially, such that event 2 is not delivered until the callback invoked for event 1 // has completed. However, it does not matter if these events are workload balance between the firefly core // cluster instances of the node. type Callbacks interface { - // BatchPinComplete notifies on the arrival of a sequenced batch of messages, which might have been - // submitted by us, or by any other authorized party in the network. - // - // Error should only be returned in shutdown scenarios - BatchPinComplete(namespace string, batch *BatchPin, signingKey *core.VerifierRef) error - - // BlockchainNetworkAction notifies on the arrival of a network operator action + // BlockchainEventBatch notifies of a sequential batch of blockchain events received. Batching allows efficiency + // by grouping commits a the database level when processing these events. // - // Error should only be returned in shutdown scenarios - BlockchainNetworkAction(action string, location *fftypes.JSONAny, event *Event, signingKey *core.VerifierRef) error - - // BlockchainEvent notifies on the arrival of any event from a user-created subscription. - BlockchainEvent(event *EventWithSubscription) error + // Errors are only returned in cases where the event appears valid, but a transient error has occurred that + // means FireFly core is unable to process the event batch right now, and the events should be pushed + // back to the connector for re-delivery. For example because the server is shutting down, or the namespace + // is currently reloading. + BlockchainEventBatch(batch []*EventToDispatch) error } // Capabilities the supported featureset of the blockchain @@ -241,10 +279,3 @@ type Event struct { // Signature is the event signature, including the event name and output types Signature string } - -type EventWithSubscription struct { - Event - - // Subscription is the ID assigned to a custom contract subscription by the connector - Subscription string -} diff --git a/pkg/core/constants.go b/pkg/core/constants.go index 22e157148c..2c73d82bf3 100644 --- a/pkg/core/constants.go +++ b/pkg/core/constants.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -55,6 +55,7 @@ const ( // SystemTagIdentityClaim is the tag for messages that broadcast an identity claim SystemTagIdentityClaim = "ff_identity_claim" + //nolint:gosec // SystemTagIdentityVerification is the tag for messages that broadcast an identity verification SystemTagIdentityVerification = "ff_identity_verification" diff --git a/pkg/core/contracts.go b/pkg/core/contracts.go index 35ac340fd1..b9c1adb2b4 100644 --- a/pkg/core/contracts.go +++ b/pkg/core/contracts.go @@ -60,27 +60,34 @@ type ContractURLs struct { } type ContractAPI struct { - ID *fftypes.UUID `ffstruct:"ContractAPI" json:"id,omitempty" ffexcludeinput:"true"` - Namespace string `ffstruct:"ContractAPI" json:"namespace,omitempty" ffexcludeinput:"true"` - Interface *fftypes.FFIReference `ffstruct:"ContractAPI" json:"interface"` - Location *fftypes.JSONAny `ffstruct:"ContractAPI" json:"location,omitempty"` - Name string `ffstruct:"ContractAPI" json:"name"` - Message *fftypes.UUID `ffstruct:"ContractAPI" json:"message,omitempty" ffexcludeinput:"true"` - URLs ContractURLs `ffstruct:"ContractAPI" json:"urls" ffexcludeinput:"true"` + ID *fftypes.UUID `ffstruct:"ContractAPI" json:"id,omitempty" ffexcludeinput:"true"` + Namespace string `ffstruct:"ContractAPI" json:"namespace,omitempty" ffexcludeinput:"true"` + Interface *fftypes.FFIReference `ffstruct:"ContractAPI" json:"interface"` + Location *fftypes.JSONAny `ffstruct:"ContractAPI" json:"location,omitempty"` + Name string `ffstruct:"ContractAPI" json:"name"` + NetworkName string `ffstruct:"ContractAPI" json:"networkName,omitempty"` + Message *fftypes.UUID `ffstruct:"ContractAPI" json:"message,omitempty" ffexcludeinput:"true"` + URLs ContractURLs `ffstruct:"ContractAPI" json:"urls" ffexcludeinput:"true"` + Published bool `ffstruct:"ContractAPI" json:"published" ffexcludeinput:"true"` } -func (c *ContractAPI) Validate(ctx context.Context, existing bool) (err error) { +func (c *ContractAPI) Validate(ctx context.Context) (err error) { if err = fftypes.ValidateFFNameField(ctx, c.Namespace, "namespace"); err != nil { return err } if err = fftypes.ValidateFFNameField(ctx, c.Name, "name"); err != nil { return err } + if c.NetworkName != "" { + if err = fftypes.ValidateFFNameField(ctx, c.NetworkName, "networkName"); err != nil { + return err + } + } return nil } func (c *ContractAPI) Topic() string { - return fftypes.TypeNamespaceNameTopicHash("contractapi", c.Namespace, c.Name) + return fftypes.TypeNamespaceNameTopicHash("contractapi", c.Namespace, c.NetworkName) } func (c *ContractAPI) SetBroadcastMessage(msgID *fftypes.UUID) { diff --git a/pkg/core/contracts_test.go b/pkg/core/contracts_test.go index b30949c455..829f1b9243 100644 --- a/pkg/core/contracts_test.go +++ b/pkg/core/contracts_test.go @@ -29,7 +29,7 @@ func TestValidateContractAPI(t *testing.T) { Namespace: "ns1", Name: "banana", } - err := api.Validate(context.Background(), false) + err := api.Validate(context.Background()) assert.NoError(t, err) } @@ -38,22 +38,31 @@ func TestValidateInvalidContractAPI(t *testing.T) { Namespace: "&%&^#()#", Name: "banana", } - err := api.Validate(context.Background(), false) + err := api.Validate(context.Background()) assert.Regexp(t, "FF00140", err) api = &ContractAPI{ Namespace: "ns1", Name: "(%&@!^%^)", } - err = api.Validate(context.Background(), false) + err = api.Validate(context.Background()) + assert.Regexp(t, "FF00140", err) + + api = &ContractAPI{ + Namespace: "ns1", + Name: "banana", + NetworkName: "(%&@!^%^)", + } + err = api.Validate(context.Background()) assert.Regexp(t, "FF00140", err) } func TestContractAPITopic(t *testing.T) { api := &ContractAPI{ - Namespace: "ns1", + Namespace: "ns1", + NetworkName: "banana", } - assert.Equal(t, "4cccc66c1f0eebcf578f1e63b73a2047d4eb4c84c0a00c69b0e00c7490403d20", api.Topic()) + assert.Equal(t, "a2f42ab7c9ef44ec08a103c565041b48b4cff4e4176e5faacfe8512c361e4f2c", api.Topic()) } func TestContractAPISetBroadCastMessage(t *testing.T) { diff --git a/pkg/core/definition.go b/pkg/core/definition.go index f56d002555..329e74993e 100644 --- a/pkg/core/definition.go +++ b/pkg/core/definition.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -25,3 +25,7 @@ type Definition interface { // SetBroadcastMessage sets the message that broadcast the definition SetBroadcastMessage(msgID *fftypes.UUID) } + +type DefinitionPublish struct { + NetworkName string `ffstruct:"DefinitionPublish" json:"networkName,omitempty"` +} diff --git a/pkg/core/event.go b/pkg/core/event.go index b96e2d7263..6f659be681 100644 --- a/pkg/core/event.go +++ b/pkg/core/event.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -99,6 +99,11 @@ type EventDelivery struct { Subscription SubscriptionRef `json:"subscription"` } +type CombinedEventDataDelivery struct { + Event *EventDelivery + Data DataArray +} + // EventDeliveryResponse is the payload an application sends back, to confirm it has accepted (or rejected) the event and as such // does not need to receive it again. type EventDeliveryResponse struct { diff --git a/pkg/core/message.go b/pkg/core/message.go index e18abc977f..269c4d51f4 100644 --- a/pkg/core/message.go +++ b/pkg/core/message.go @@ -98,6 +98,7 @@ type Message struct { TransactionID *fftypes.UUID `ffstruct:"Message" json:"txid,omitempty" ffexcludeinput:"true"` State MessageState `ffstruct:"Message" json:"state,omitempty" ffenum:"messagestate" ffexcludeinput:"true"` Confirmed *fftypes.FFTime `ffstruct:"Message" json:"confirmed,omitempty" ffexcludeinput:"true"` + RejectReason string `ffstruct:"Message" json:"rejectReason,omitempty" ffexcludeinput:"true"` Data DataRefs `ffstruct:"Message" json:"data" ffexcludeinput:"true"` Pins fftypes.FFStringArray `ffstruct:"Message" json:"pins,omitempty" ffexcludeinput:"true"` IdempotencyKey IdempotencyKey `ffstruct:"Message" json:"idempotencyKey,omitempty"` diff --git a/pkg/core/namespace.go b/pkg/core/namespace.go index b29ef32668..d121b40008 100644 --- a/pkg/core/namespace.go +++ b/pkg/core/namespace.go @@ -18,6 +18,7 @@ package core import ( "context" + "crypto/tls" "database/sql/driver" "encoding/json" @@ -28,11 +29,12 @@ import ( // Namespace is an isolated set of named resources, to allow multiple applications to co-exist in the same network, with the same named objects. // Can be used for use case segregation, or multi-tenancy. type Namespace struct { - Name string `ffstruct:"Namespace" json:"name"` - NetworkName string `ffstruct:"Namespace" json:"networkName"` - Description string `ffstruct:"Namespace" json:"description"` - Created *fftypes.FFTime `ffstruct:"Namespace" json:"created" ffexcludeinput:"true"` - Contracts *MultipartyContracts `ffstruct:"Namespace" json:"-"` + Name string `ffstruct:"Namespace" json:"name"` + NetworkName string `ffstruct:"Namespace" json:"networkName"` + Description string `ffstruct:"Namespace" json:"description"` + Created *fftypes.FFTime `ffstruct:"Namespace" json:"created" ffexcludeinput:"true"` + Contracts *MultipartyContracts `ffstruct:"Namespace" json:"-"` + TLSConfigs map[string]*tls.Config `ffstruct:"Namespace" json:"-" ffexcludeinput:"true"` } type NamespaceWithInitStatus struct { diff --git a/pkg/core/operation.go b/pkg/core/operation.go index e47b2a21c7..5d8d098b8a 100644 --- a/pkg/core/operation.go +++ b/pkg/core/operation.go @@ -144,6 +144,14 @@ type PreparedOperation struct { Data interface{} `json:"data"` } +type OpPhase int + +const ( + OpPhaseComplete OpPhase = iota + OpPhasePending + OpPhaseInitializing +) + func (po *PreparedOperation) NamespacedIDString() string { return po.Namespace + ":" + po.ID.String() } diff --git a/pkg/core/subscription.go b/pkg/core/subscription.go index 97e80e27fb..7af6fc85e7 100644 --- a/pkg/core/subscription.go +++ b/pkg/core/subscription.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -89,9 +89,11 @@ const ( // SubscriptionCoreOptions are the core options that apply across all transports type SubscriptionCoreOptions struct { - FirstEvent *SubOptsFirstEvent `ffstruct:"SubscriptionCoreOptions" json:"firstEvent,omitempty"` - ReadAhead *uint16 `ffstruct:"SubscriptionCoreOptions" json:"readAhead,omitempty"` - WithData *bool `ffstruct:"SubscriptionCoreOptions" json:"withData,omitempty"` + FirstEvent *SubOptsFirstEvent `ffstruct:"SubscriptionCoreOptions" json:"firstEvent,omitempty"` + ReadAhead *uint16 `ffstruct:"SubscriptionCoreOptions" json:"readAhead,omitempty"` + WithData *bool `ffstruct:"SubscriptionCoreOptions" json:"withData,omitempty"` + Batch *bool `ffstruct:"SubscriptionCoreOptions" json:"batch,omitempty"` + BatchTimeout *string `ffstruct:"SubscriptionCoreOptions" json:"batchTimeout,omitempty"` } // SubscriptionOptions customize the behavior of subscriptions @@ -137,6 +139,9 @@ func (so *SubscriptionOptions) UnmarshalJSON(b []byte) error { if err == nil { err = json.Unmarshal(b, &so.SubscriptionCoreOptions) } + if err == nil { + err = json.Unmarshal(b, &so.WebhookSubOptions) + } if err != nil { return err } @@ -159,6 +164,10 @@ func (so SubscriptionOptions) MarshalJSON() ([]byte, error) { if so.ReadAhead != nil { so.additionalOptions["readAhead"] = float64(*so.ReadAhead) } + if so.TLSConfigName != "" { + so.additionalOptions["tlsConfigName"] = so.TLSConfigName + } + return json.Marshal(&so.additionalOptions) } diff --git a/pkg/core/subscription_test.go b/pkg/core/subscription_test.go index fb202181d4..0165dc5939 100644 --- a/pkg/core/subscription_test.go +++ b/pkg/core/subscription_test.go @@ -35,6 +35,9 @@ func TestSubscriptionOptionsDatabaseSerialization(t *testing.T) { ReadAhead: &readAhead, WithData: &yes, }, + WebhookSubOptions: WebhookSubOptions{ + TLSConfigName: "myconfig", + }, }, Filter: SubscriptionFilter{}, } @@ -46,7 +49,7 @@ func TestSubscriptionOptionsDatabaseSerialization(t *testing.T) { // Verify it serializes as bytes to the database b1, err := sub1.Options.Value() assert.NoError(t, err) - assert.Equal(t, `{"firstEvent":"newest","my-nested-opts":{"myopt1":12345,"myopt2":"test"},"readAhead":50,"withData":true}`, string(b1.([]byte))) + assert.Equal(t, `{"firstEvent":"newest","my-nested-opts":{"myopt1":12345,"myopt2":"test"},"readAhead":50,"tlsConfigName":"myconfig","withData":true}`, string(b1.([]byte))) f1, err := sub1.Filter.Value() assert.NoError(t, err) @@ -61,6 +64,7 @@ func TestSubscriptionOptionsDatabaseSerialization(t *testing.T) { assert.NoError(t, err) assert.Equal(t, SubOptsFirstEventNewest, *sub2.Options.FirstEvent) assert.Equal(t, uint16(50), *sub2.Options.ReadAhead) + assert.Equal(t, "myconfig", sub2.Options.TLSConfigName) assert.Equal(t, string(b1.([]byte)), string(b2.([]byte))) // Confirm we don't pass core options, to transports diff --git a/pkg/core/tokenpool.go b/pkg/core/tokenpool.go index af91eddebc..d4d7dccbcb 100644 --- a/pkg/core/tokenpool.go +++ b/pkg/core/tokenpool.go @@ -29,16 +29,6 @@ var ( TokenTypeNonFungible = fftypes.FFEnumValue("tokentype", "nonfungible") ) -// TokenPoolState is the current confirmation state of a token pool -type TokenPoolState = fftypes.FFEnum - -var ( - // TokenPoolStatePending is a token pool that has been announced but not yet confirmed - TokenPoolStatePending = fftypes.FFEnumValue("tokenpoolstate", "pending") - // TokenPoolStateConfirmed is a token pool that has been confirmed on chain - TokenPoolStateConfirmed = fftypes.FFEnumValue("tokenpoolstate", "confirmed") -) - type TokenInterfaceFormat = fftypes.FFEnum var ( @@ -56,6 +46,7 @@ type TokenPool struct { Type TokenType `ffstruct:"TokenPool" json:"type" ffenum:"tokentype"` Namespace string `ffstruct:"TokenPool" json:"namespace,omitempty" ffexcludeinput:"true"` Name string `ffstruct:"TokenPool" json:"name,omitempty"` + NetworkName string `ffstruct:"TokenPool" json:"networkName,omitempty"` Standard string `ffstruct:"TokenPool" json:"standard,omitempty" ffexcludeinput:"true"` Locator string `ffstruct:"TokenPool" json:"locator,omitempty" ffexcludeinput:"true"` Key string `ffstruct:"TokenPool" json:"key,omitempty"` @@ -63,7 +54,7 @@ type TokenPool struct { Decimals int `ffstruct:"TokenPool" json:"decimals,omitempty" ffexcludeinput:"true"` Connector string `ffstruct:"TokenPool" json:"connector,omitempty"` Message *fftypes.UUID `ffstruct:"TokenPool" json:"message,omitempty" ffexcludeinput:"true"` - State TokenPoolState `ffstruct:"TokenPool" json:"state,omitempty" ffenum:"tokenpoolstate" ffexcludeinput:"true"` + Active bool `ffstruct:"TokenPool" json:"active" ffexcludeinput:"true"` Created *fftypes.FFTime `ffstruct:"TokenPool" json:"created,omitempty" ffexcludeinput:"true"` Config fftypes.JSONObject `ffstruct:"TokenPool" json:"config,omitempty" ffexcludeoutput:"true"` // for REST calls only (not stored) Info fftypes.JSONObject `ffstruct:"TokenPool" json:"info,omitempty" ffexcludeinput:"true"` @@ -71,9 +62,11 @@ type TokenPool struct { Interface *fftypes.FFIReference `ffstruct:"TokenPool" json:"interface,omitempty"` InterfaceFormat TokenInterfaceFormat `ffstruct:"TokenPool" json:"interfaceFormat,omitempty" ffenum:"tokeninterfaceformat" ffexcludeinput:"true"` Methods *fftypes.JSONAny `ffstruct:"TokenPool" json:"methods,omitempty" ffexcludeinput:"true"` + Published bool `ffstruct:"TokenPool" json:"published" ffexcludeinput:"true"` + PluginData string `ffstruct:"TokenPool" json:"-" ffexcludeinput:"true"` // reserved for internal plugin use (not returned on API) } -type TokenPoolAnnouncement struct { +type TokenPoolDefinition struct { Pool *TokenPool `json:"pool"` } @@ -81,13 +74,18 @@ func (t *TokenPool) Validate(ctx context.Context) (err error) { if err = fftypes.ValidateFFNameFieldNoUUID(ctx, t.Name, "name"); err != nil { return err } + if t.NetworkName != "" { + if err = fftypes.ValidateFFNameFieldNoUUID(ctx, t.NetworkName, "networkName"); err != nil { + return err + } + } return nil } -func (t *TokenPoolAnnouncement) Topic() string { - return fftypes.TypeNamespaceNameTopicHash("tokenpool", t.Pool.Namespace, t.Pool.Name) +func (t *TokenPoolDefinition) Topic() string { + return fftypes.TypeNamespaceNameTopicHash("tokenpool", t.Pool.Namespace, t.Pool.NetworkName) } -func (t *TokenPoolAnnouncement) SetBroadcastMessage(msgID *fftypes.UUID) { +func (t *TokenPoolDefinition) SetBroadcastMessage(msgID *fftypes.UUID) { t.Pool.Message = msgID } diff --git a/pkg/core/tokenpool_test.go b/pkg/core/tokenpool_test.go index 7acd506f1c..e2cafdceb9 100644 --- a/pkg/core/tokenpool_test.go +++ b/pkg/core/tokenpool_test.go @@ -32,6 +32,14 @@ func TestTokenPoolValidation(t *testing.T) { err := pool.Validate(context.Background()) assert.Regexp(t, "FF00140.*'name'", err) + pool = &TokenPool{ + Namespace: "ok", + Name: "ok", + NetworkName: "!wrong", + } + err = pool.Validate(context.Background()) + assert.Regexp(t, "FF00140.*'networkName'", err) + pool = &TokenPool{ Namespace: "ok", Name: "ok", @@ -42,10 +50,10 @@ func TestTokenPoolValidation(t *testing.T) { func TestTokenPoolDefinition(t *testing.T) { pool := &TokenPool{ - Namespace: "ok", - Name: "ok", + Namespace: "ok", + NetworkName: "ok", } - var def Definition = &TokenPoolAnnouncement{Pool: pool} + var def Definition = &TokenPoolDefinition{Pool: pool} assert.Equal(t, "73008386c5579b7015385528eb892f7773e13a20015c692f6b90b26e413fe8a4", def.Topic()) id := fftypes.NewUUID() diff --git a/pkg/core/verifier.go b/pkg/core/verifier.go index 5ddd85b52e..0a589474f9 100644 --- a/pkg/core/verifier.go +++ b/pkg/core/verifier.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -28,6 +28,8 @@ type VerifierType = fftypes.FFEnum var ( // VerifierTypeEthAddress is an Ethereum (secp256k1) address string VerifierTypeEthAddress = fftypes.FFEnumValue("verifiertype", "ethereum_address") + // VerifierTypeTezosAddress is a Tezos (ed25519) address string + VerifierTypeTezosAddress = fftypes.FFEnumValue("verifiertype", "tezos_address") // VerifierTypeMSPIdentity is the MSP id (X509 distinguished name) of an issued signing certificate / keypair VerifierTypeMSPIdentity = fftypes.FFEnumValue("verifiertype", "fabric_msp_id") // VerifierTypeFFDXPeerID is the peer identifier that FireFly Data Exchange verifies (using plugin specific tech) when receiving data diff --git a/pkg/core/webhooks.go b/pkg/core/webhooks.go index 79f6484511..9036519d65 100644 --- a/pkg/core/webhooks.go +++ b/pkg/core/webhooks.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -16,17 +16,45 @@ package core +import ( + "crypto/tls" + + "github.com/go-resty/resty/v2" +) + type WebhookSubOptions struct { - Fastack bool `ffstruct:"WebhookSubOptions" json:"fastack,omitempty"` - URL string `ffstruct:"WebhookSubOptions" json:"url,omitempty"` - Method string `ffstruct:"WebhookSubOptions" json:"method,omitempty"` - JSON bool `ffstruct:"WebhookSubOptions" json:"json,omitempty"` - Reply bool `ffstruct:"WebhookSubOptions" json:"reply,omitempty"` - ReplyTag string `ffstruct:"WebhookSubOptions" json:"replytag,omitempty"` - ReplyTX string `ffstruct:"WebhookSubOptions" json:"replytx,omitempty"` - Headers map[string]string `ffstruct:"WebhookSubOptions" json:"headers,omitempty"` - Query map[string]string `ffstruct:"WebhookSubOptions" json:"query,omitempty"` - Input WebhookInputOptions `ffstruct:"WebhookSubOptions" json:"input,omitempty"` + Fastack bool `ffstruct:"WebhookSubOptions" json:"fastack,omitempty"` + URL string `ffstruct:"WebhookSubOptions" json:"url,omitempty"` + Method string `ffstruct:"WebhookSubOptions" json:"method,omitempty"` + JSON bool `ffstruct:"WebhookSubOptions" json:"json,omitempty"` + Reply bool `ffstruct:"WebhookSubOptions" json:"reply,omitempty"` + ReplyTag string `ffstruct:"WebhookSubOptions" json:"replytag,omitempty"` + ReplyTX string `ffstruct:"WebhookSubOptions" json:"replytx,omitempty"` + Headers map[string]string `ffstruct:"WebhookSubOptions" json:"headers,omitempty"` + Query map[string]string `ffstruct:"WebhookSubOptions" json:"query,omitempty"` + TLSConfigName string `ffstruct:"WebhookSubOptions" json:"tlsConfigName,omitempty"` + TLSConfig *tls.Config `ffstruct:"WebhookSubOptions" json:"-" ffexcludeinput:"true"` + Input WebhookInputOptions `ffstruct:"WebhookSubOptions" json:"input,omitempty"` + Retry WebhookRetryOptions `ffstruct:"WebhookSubOptions" json:"retry,omitempty"` + HTTPOptions WebhookHTTPOptions `ffstruct:"WebhookSubOptions" json:"httpOptions,omitempty"` + RestyClient *resty.Client `ffstruct:"WebhookSubOptions" json:"-" ffexcludeinput:"true"` +} + +type WebhookRetryOptions struct { + Enabled bool `ffstruct:"WebhookRetryOptions" json:"enabled,omitempty"` + Count int `ffstruct:"WebhookRetryOptions" json:"count,omitempty"` + InitialDelay string `ffstruct:"WebhookRetryOptions" json:"initialDelay,omitempty"` + MaximumDelay string `ffstruct:"WebhookRetryOptions" json:"maxDelay,omitempty"` +} + +type WebhookHTTPOptions struct { + HTTPProxyURL *string `ffstruct:"WebhookHTTPOptions" json:"proxyURL,omitempty"` + HTTPTLSHandshakeTimeout string `ffstruct:"WebhookHTTPOptions" json:"tlsHandshakeTimeout,omitempty"` + HTTPRequestTimeout string `ffstruct:"WebhookHTTPOptions" json:"requestTimeout,omitempty"` + HTTPMaxIdleConns int `ffstruct:"WebhookHTTPOptions" json:"maxIdleConns,omitempty"` + HTTPIdleConnTimeout string `ffstruct:"WebhookHTTPOptions" json:"idleTimeout,omitempty"` + HTTPConnectionTimeout string `ffstruct:"WebhookHTTPOptions" json:"connectionTimeout,omitempty"` + HTTPExpectContinueTimeout string `ffstruct:"WebhookHTTPOptions" json:"expectContinueTimeout,omitempty"` } type WebhookInputOptions struct { diff --git a/pkg/database/plugin.go b/pkg/database/plugin.go index 4e643dac35..cd6fcac7fd 100644 --- a/pkg/database/plugin.go +++ b/pkg/database/plugin.go @@ -158,16 +158,19 @@ type iBatchCollection interface { type iTransactionCollection interface { // InsertTransaction - Insert a new transaction - InsertTransaction(ctx context.Context, data *core.Transaction) (err error) + InsertTransaction(ctx context.Context, txn *core.Transaction) (err error) + + // InsertTransactions performs a batch insert of transactions - returns error if idempotency keys clash while inserting the non-clashing ones, so caller can query to find the existing ones + InsertTransactions(ctx context.Context, txns []*core.Transaction) (err error) // UpdateTransaction - Update transaction UpdateTransaction(ctx context.Context, namespace string, id *fftypes.UUID, update ffapi.Update) (err error) // GetTransactionByID - Get a transaction by ID - GetTransactionByID(ctx context.Context, namespace string, id *fftypes.UUID) (message *core.Transaction, err error) + GetTransactionByID(ctx context.Context, namespace string, id *fftypes.UUID) (txn *core.Transaction, err error) // GetTransactions - Get transactions - GetTransactions(ctx context.Context, namespace string, filter ffapi.Filter) (message []*core.Transaction, res *ffapi.FilterResult, err error) + GetTransactions(ctx context.Context, namespace string, filter ffapi.Filter) (txn []*core.Transaction, res *ffapi.FilterResult, err error) } type iDatatypeCollection interface { @@ -219,6 +222,9 @@ type iOperationCollection interface { // InsertOperation - Insert an operation InsertOperation(ctx context.Context, operation *core.Operation, hooks ...PostCompletionHook) (err error) + // InsertOperations bulk insert operations - all must succeed/fail together (idempotency clashes are handled by containing transaction) + InsertOperations(ctx context.Context, ops []*core.Operation, hooks ...PostCompletionHook) (err error) + // UpdateOperation - Update an operation UpdateOperation(ctx context.Context, namespace string, id *fftypes.UUID, filter ffapi.Filter, update ffapi.Update) (updated bool, err error) @@ -352,8 +358,12 @@ type iBlobCollection interface { } type iTokenPoolCollection interface { + // InsertTokenPool - Insert a new token pool + // If a pool with the same name has already been recorded, does not insert but returns the existing row + InsertOrGetTokenPool(ctx context.Context, pool *core.TokenPool) (existing *core.TokenPool, err error) + // UpsertTokenPool - Upsert a token pool - UpsertTokenPool(ctx context.Context, pool *core.TokenPool) error + UpsertTokenPool(ctx context.Context, pool *core.TokenPool, optimization UpsertOptimization) error // GetTokenPool - Get a token pool by name GetTokenPool(ctx context.Context, namespace, name string) (*core.TokenPool, error) @@ -361,11 +371,14 @@ type iTokenPoolCollection interface { // GetTokenPoolByID - Get a token pool by pool ID GetTokenPoolByID(ctx context.Context, namespace string, id *fftypes.UUID) (*core.TokenPool, error) - // GetTokenPoolByLocator - Get a token pool by locator - GetTokenPoolByLocator(ctx context.Context, namespace, connector, locator string) (*core.TokenPool, error) + // GetTokenPoolByNetworkName - Get a token pool by network name + GetTokenPoolByNetworkName(ctx context.Context, namespace, networkName string) (*core.TokenPool, error) // GetTokenPools - Get token pools GetTokenPools(ctx context.Context, namespace string, filter ffapi.Filter) ([]*core.TokenPool, *ffapi.FilterResult, error) + + // DeleteTokenPool - delete a token pool + DeleteTokenPool(ctx context.Context, namespace string, id *fftypes.UUID) error } type iTokenBalanceCollection interface { @@ -383,20 +396,27 @@ type iTokenBalanceCollection interface { // GetTokenAccountPools - Get the list of pools referenced by a given account GetTokenAccountPools(ctx context.Context, namespace, key string, filter ffapi.Filter) ([]*core.TokenAccountPool, *ffapi.FilterResult, error) + + // DeleteTokenBalances - Delete token balances from a particular pool + DeleteTokenBalances(ctx context.Context, namespace string, poolID *fftypes.UUID) error } type iTokenTransferCollection interface { - // UpsertTokenTransfer - Upsert a token transfer - UpsertTokenTransfer(ctx context.Context, transfer *core.TokenTransfer) error + // InsertOrGetTokenTransfer - insert a token transfer event from the blockchain + // If the ProtocolID has already been recorded, it does not insert but returns the existing row + InsertOrGetTokenTransfer(ctx context.Context, approval *core.TokenTransfer) (existing *core.TokenTransfer, err error) // GetTokenTransferByID - Get a token transfer by ID GetTokenTransferByID(ctx context.Context, namespace string, localID *fftypes.UUID) (*core.TokenTransfer, error) // GetTokenTransferByProtocolID - Get a token transfer by protocol ID - GetTokenTransferByProtocolID(ctx context.Context, namespace, connector, protocolID string) (*core.TokenTransfer, error) + GetTokenTransferByProtocolID(ctx context.Context, namespace string, poolID *fftypes.UUID, protocolID string) (*core.TokenTransfer, error) // GetTokenTransfers - Get token transfers GetTokenTransfers(ctx context.Context, namespace string, filter ffapi.Filter) ([]*core.TokenTransfer, *ffapi.FilterResult, error) + + // DeleteTokenTransfers - Delete token transfers from a particular pool + DeleteTokenTransfers(ctx context.Context, namespace string, poolID *fftypes.UUID) error } type iTokenApprovalCollection interface { @@ -410,15 +430,22 @@ type iTokenApprovalCollection interface { GetTokenApprovalByID(ctx context.Context, namespace string, localID *fftypes.UUID) (*core.TokenApproval, error) // GetTokenTransferByProtocolID - Get a token approval by protocol ID - GetTokenApprovalByProtocolID(ctx context.Context, namespace, connector, protocolID string) (*core.TokenApproval, error) + GetTokenApprovalByProtocolID(ctx context.Context, namespace string, poolID *fftypes.UUID, protocolID string) (*core.TokenApproval, error) // GetTokenApprovals - Get token approvals GetTokenApprovals(ctx context.Context, namespace string, filter ffapi.Filter) ([]*core.TokenApproval, *ffapi.FilterResult, error) + + // DeleteTokenApprovals - Delete token approvals from a particular pool + DeleteTokenApprovals(ctx context.Context, namespace string, poolID *fftypes.UUID) error } type iFFICollection interface { + // InsertOrGetFFI - Insert an FFI + // If an FFI with the same name has already been recorded, does not insert but returns the existing row + InsertOrGetFFI(ctx context.Context, ffi *fftypes.FFI) (*fftypes.FFI, error) + // UpsertFFI - Upsert an FFI - UpsertFFI(ctx context.Context, cd *fftypes.FFI) error + UpsertFFI(ctx context.Context, ffi *fftypes.FFI, optimization UpsertOptimization) error // GetFFIs - Get FFIs GetFFIs(ctx context.Context, namespace string, filter ffapi.Filter) ([]*fftypes.FFI, *ffapi.FilterResult, error) @@ -428,6 +455,12 @@ type iFFICollection interface { // GetFFI - Get an FFI by name and version GetFFI(ctx context.Context, namespace, name, version string) (*fftypes.FFI, error) + + // GetFFIByNetworkName - Get an FFI by network name and version + GetFFIByNetworkName(ctx context.Context, namespace, networkName, version string) (*fftypes.FFI, error) + + // DeleteFFI - Delete an FFI + DeleteFFI(ctx context.Context, namespace string, id *fftypes.UUID) error } type iFFIMethodCollection interface { @@ -461,8 +494,12 @@ type iFFIErrorCollection interface { } type iContractAPICollection interface { + // InsertOrGetContractAPI - Insert a contract API + // If an API with the same name has already been recorded, does not insert but returns the existing row + InsertOrGetContractAPI(ctx context.Context, api *core.ContractAPI) (*core.ContractAPI, error) + // UpsertFFIEvent - Upsert a contract API - UpsertContractAPI(ctx context.Context, cd *core.ContractAPI) error + UpsertContractAPI(ctx context.Context, api *core.ContractAPI, optimization UpsertOptimization) error // GetContractAPIs - Get contract APIs GetContractAPIs(ctx context.Context, namespace string, filter ffapi.AndFilter) ([]*core.ContractAPI, *ffapi.FilterResult, error) @@ -472,6 +509,12 @@ type iContractAPICollection interface { // GetContractAPIByName - Get a contract API by name GetContractAPIByName(ctx context.Context, namespace, name string) (*core.ContractAPI, error) + + // GetContractAPIByNetworkName - Get a contract API by network name + GetContractAPIByNetworkName(ctx context.Context, namespace, networkName string) (*core.ContractAPI, error) + + // DeleteContractAPI - Delete a contract API + DeleteContractAPI(ctx context.Context, namespace string, id *fftypes.UUID) error } type iContractListenerCollection interface { @@ -498,6 +541,10 @@ type iContractListenerCollection interface { } type iBlockchainEventCollection interface { + + // InsertBlockchainEvents performs a batch insert of blockchain events - fails if they already exist, so caller can fall back to InsertOrGetBlockchainEvent individually + InsertBlockchainEvents(ctx context.Context, messages []*core.BlockchainEvent, hooks ...PostCompletionHook) (err error) + // InsertOrGetBlockchainEvent - insert an event from the blockchain // If the ProtocolID has already been recorded, it does not insert but returns the existing row InsertOrGetBlockchainEvent(ctx context.Context, event *core.BlockchainEvent) (existing *core.BlockchainEvent, err error) @@ -705,6 +752,7 @@ var MessageQueryFactory = &ffapi.QueryFields{ "pins": &ffapi.FFStringArrayField{}, "state": &ffapi.StringField{}, "confirmed": &ffapi.TimeField{}, + "rejectreason": &ffapi.StringField{}, "sequence": &ffapi.Int64Field{}, "txtype": &ffapi.StringField{}, "batch": &ffapi.UUIDField{}, @@ -883,18 +931,20 @@ var TokenPoolQueryFactory = &ffapi.QueryFields{ "id": &ffapi.UUIDField{}, "type": &ffapi.StringField{}, "name": &ffapi.StringField{}, + "networkname": &ffapi.StringField{}, "standard": &ffapi.StringField{}, "locator": &ffapi.StringField{}, "symbol": &ffapi.StringField{}, "decimals": &ffapi.Int64Field{}, "message": &ffapi.UUIDField{}, - "state": &ffapi.StringField{}, + "active": &ffapi.BoolField{}, "created": &ffapi.TimeField{}, "connector": &ffapi.StringField{}, "tx.type": &ffapi.StringField{}, "tx.id": &ffapi.UUIDField{}, "interface": &ffapi.UUIDField{}, "interfaceformat": &ffapi.StringField{}, + "published": &ffapi.BoolField{}, } // TokenBalanceQueryFactory filter fields for token balances @@ -961,9 +1011,11 @@ var TokenApprovalQueryFactory = &ffapi.QueryFields{ // FFIQueryFactory filter fields for contract definitions var FFIQueryFactory = &ffapi.QueryFields{ - "id": &ffapi.UUIDField{}, - "name": &ffapi.StringField{}, - "version": &ffapi.StringField{}, + "id": &ffapi.UUIDField{}, + "name": &ffapi.StringField{}, + "networkname": &ffapi.StringField{}, + "version": &ffapi.StringField{}, + "published": &ffapi.BoolField{}, } // FFIMethodQueryFactory filter fields for contract methods @@ -1022,7 +1074,9 @@ var BlockchainEventQueryFactory = &ffapi.QueryFields{ // ContractAPIQueryFactory filter fields for Contract APIs var ContractAPIQueryFactory = &ffapi.QueryFields{ - "id": &ffapi.UUIDField{}, - "name": &ffapi.StringField{}, - "interface": &ffapi.UUIDField{}, + "id": &ffapi.UUIDField{}, + "name": &ffapi.StringField{}, + "networkname": &ffapi.StringField{}, + "interface": &ffapi.UUIDField{}, + "published": &ffapi.BoolField{}, } diff --git a/pkg/events/plugin.go b/pkg/events/plugin.go index b229eb348b..03098c5d2a 100644 --- a/pkg/events/plugin.go +++ b/pkg/events/plugin.go @@ -45,11 +45,15 @@ type Plugin interface { // ValidateOptions verifies a set of input options, prior to storage of a new subscription // The plugin can modify the core subscription options, such as overriding whether data is delivered. - ValidateOptions(options *core.SubscriptionOptions) error + ValidateOptions(ctx context.Context, options *core.SubscriptionOptions) error // DeliveryRequest requests delivery of work on a connection, which must later be responded to // Data will only be supplied as non-nil if the subscription is set to include data - DeliveryRequest(connID string, sub *core.Subscription, event *core.EventDelivery, data core.DataArray) error + DeliveryRequest(ctx context.Context, connID string, sub *core.Subscription, event *core.EventDelivery, data core.DataArray) error + + // DeliveryBatchRequest requests delivery of multiple events on a connection, which must later be responded to + // Data will only be supplied as non-nil if the subscription is set to include data + BatchDeliveryRequest(ctx context.Context, connID string, sub *core.Subscription, events []*core.CombinedEventDataDelivery) error // NamespaceRestarted is called after a namespace restarts. For a connect-in style plugin, like // WebSockets, this must re-register any active connections that started before the time passed in. @@ -83,4 +87,6 @@ type Callbacks interface { DeliveryResponse(connID string, inflight *core.EventDeliveryResponse) } -type Capabilities struct{} +type Capabilities struct { + BatchDelivery bool +} diff --git a/pkg/tokens/plugin.go b/pkg/tokens/plugin.go index c11c476c78..328f62c20e 100644 --- a/pkg/tokens/plugin.go +++ b/pkg/tokens/plugin.go @@ -50,10 +50,13 @@ type Plugin interface { Capabilities() *Capabilities // CreateTokenPool creates a new (fungible or non-fungible) pool of tokens - CreateTokenPool(ctx context.Context, nsOpID string, pool *core.TokenPool) (complete bool, err error) + CreateTokenPool(ctx context.Context, nsOpID string, pool *core.TokenPool) (phase core.OpPhase, err error) // ActivateTokenPool activates a pool in order to begin receiving events - ActivateTokenPool(ctx context.Context, nsOpID string, pool *core.TokenPool) (complete bool, err error) + ActivateTokenPool(ctx context.Context, pool *core.TokenPool) (phase core.OpPhase, err error) + + // DectivateTokenPool deactivates a pool in order to stop receiving events and remove underlying listeners + DeactivateTokenPool(ctx context.Context, pool *core.TokenPool) error // CheckInterface checks which methods of a contract interface are supported by this connector CheckInterface(ctx context.Context, pool *core.TokenPool, methods []*fftypes.FFIMethod) (*fftypes.JSONAny, error) @@ -107,7 +110,10 @@ type TokenPool struct { // Type is the type of tokens (fungible, non-fungible, etc) in this pool Type core.TokenType - // PoolLocator is the ID assigned to this pool by the connector (must be unique for this connector) + // ID is the ID assigned to this pool by FireFly (if known) + ID *fftypes.UUID + + // PoolLocator is the identifier assigned to this pool by the token connector (includes the contract address or other location info) PoolLocator string // TX is the FireFly-assigned information to correlate this to a transaction (optional) @@ -131,6 +137,9 @@ type TokenPool struct { // Info is any other connector-specific info on the pool that may be worth saving (optional) Info fftypes.JSONObject + // PluginData is any other data that the plugin would like to save with the pool (optional) + PluginData string + // Event contains info on the underlying blockchain event for this pool creation Event *blockchain.Event } @@ -145,7 +154,6 @@ type TokenPoolMethods struct { type TokenTransfer struct { // Although not every field will be filled in, embed core.TokenTransfer to avoid duplicating lots of fields - // Notable fields NOT expected to be populated by plugins: Namespace, LocalID, Pool core.TokenTransfer // PoolLocator is the ID assigned to the token pool by the connector @@ -156,6 +164,7 @@ type TokenTransfer struct { } type TokenApproval struct { + // Although not every field will be filled in, embed core.TokenApproval to avoid duplicating lots of fields core.TokenApproval // PoolLocator is the ID assigned to the token pool by the connector diff --git a/smart_contracts/fabric/custompin-sample/go.mod b/smart_contracts/fabric/custompin-sample/go.mod index 5f65fa1239..ee85cb430a 100644 --- a/smart_contracts/fabric/custompin-sample/go.mod +++ b/smart_contracts/fabric/custompin-sample/go.mod @@ -1,6 +1,6 @@ module github.com/hyperledger/firefly/custompin_sample -go 1.18 +go 1.21 require ( github.com/hyperledger/fabric-chaincode-go v0.0.0-20210718160520-38d29fabecb9 diff --git a/smart_contracts/fabric/firefly-go/Dockerfile b/smart_contracts/fabric/firefly-go/Dockerfile index 92bbc19441..ff5b3391e2 100644 --- a/smart_contracts/fabric/firefly-go/Dockerfile +++ b/smart_contracts/fabric/firefly-go/Dockerfile @@ -1,8 +1,9 @@ -FROM golang:1.18 +FROM golang:1.21 WORKDIR /app COPY firefly.go go.mod go.sum ./ COPY chaincode/ ./chaincode/ +COPY batchpin/ ./batchpin/ RUN ls -la ./ \ && GO111MODULE=on GOOS=linux CGO_ENABLED=0 go build -o firefly.bin firefly.go diff --git a/smart_contracts/fabric/firefly-go/go.mod b/smart_contracts/fabric/firefly-go/go.mod index 7c7d520ab7..b6807b477e 100644 --- a/smart_contracts/fabric/firefly-go/go.mod +++ b/smart_contracts/fabric/firefly-go/go.mod @@ -1,6 +1,6 @@ module github.com/hyperledger/firefly/chaincode-go -go 1.18 +go 1.21 require ( github.com/golang/protobuf v1.4.3 diff --git a/test/data/contracts/coupon/coupon.json b/test/data/contracts/coupon/coupon.json new file mode 100644 index 0000000000..f2ea3be5e7 --- /dev/null +++ b/test/data/contracts/coupon/coupon.json @@ -0,0 +1,812 @@ +{ + "contracts": { + "coupon.sol:Coupon": { + "abi": [ + { + "inputs": [ + { + "internalType": "address", + "name": "_ad", + "type": "address" + } + ], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "inputs": [], + "name": "AccessDenied", + "type": "error" + }, + { + "inputs": [], + "name": "AlreadyRedeemed", + "type": "error" + }, + { + "inputs": [], + "name": "CheckTheDateOfCoupon", + "type": "error" + }, + { + "inputs": [], + "name": "CouponDoesNotExist", + "type": "error" + }, + { + "inputs": [], + "name": "IdIsAlreadyTake", + "type": "error" + }, + { + "inputs": [], + "name": "IdIsNotYetCreated", + "type": "error" + }, + { + "inputs": [], + "name": "NotAnAdmin", + "type": "error" + }, + { + "inputs": [], + "name": "URIQueryForNonexistentToken", + "type": "error" + }, + { + "inputs": [], + "name": "idIsNotCreatedByAdmin", + "type": "error" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "approved", + "type": "address" + }, + { + "indexed": true, + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + } + ], + "name": "Approval", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "operator", + "type": "address" + }, + { + "indexed": false, + "internalType": "bool", + "name": "approved", + "type": "bool" + } + ], + "name": "ApprovalForAll", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "uint256", + "name": "_tokenId", + "type": "uint256" + }, + { + "indexed": true, + "internalType": "string", + "name": "_ifpsURL", + "type": "string" + }, + { + "indexed": true, + "internalType": "string", + "name": "_status", + "type": "string" + } + ], + "name": "CouponCreated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "_whoUpdated", + "type": "address" + }, + { + "indexed": true, + "internalType": "uint256", + "name": "_couponId", + "type": "uint256" + }, + { + "indexed": true, + "internalType": "uint256", + "name": "_newEndDate", + "type": "uint256" + } + ], + "name": "EndDateUpdate", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "previousOwner", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "newOwner", + "type": "address" + } + ], + "name": "OwnershipTransferred", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "_ad", + "type": "address" + }, + { + "indexed": true, + "internalType": "uint256", + "name": "_couponId", + "type": "uint256" + }, + { + "indexed": true, + "internalType": "uint256", + "name": "_tokenId", + "type": "uint256" + } + ], + "name": "Redeemed", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "indexed": true, + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + } + ], + "name": "Transfer", + "type": "event" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_ad", + "type": "address" + } + ], + "name": "addAdmin", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "allAdminAddresses", + "outputs": [ + { + "internalType": "address[]", + "name": "allAdminAddress", + "type": "address[]" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "allCouponIds", + "outputs": [ + { + "internalType": "uint256[]", + "name": "allCreatedIds", + "type": "uint256[]" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + } + ], + "name": "approve", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "owner", + "type": "address" + } + ], + "name": "balanceOf", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_id", + "type": "uint256" + } + ], + "name": "couponAndRelatedTokenIds", + "outputs": [ + { + "internalType": "uint256[]", + "name": "couponRelatedIds", + "type": "uint256[]" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_id", + "type": "uint256" + } + ], + "name": "couponCreatedOrNotCreatedStatus", + "outputs": [ + { + "internalType": "bool", + "name": "status", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_ad", + "type": "address" + } + ], + "name": "couponsPerAddress", + "outputs": [ + { + "internalType": "uint256[]", + "name": "allCouponIdsOfUsers", + "type": "uint256[]" + }, + { + "internalType": "uint256", + "name": "totalCoupons", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_id", + "type": "uint256" + }, + { + "internalType": "string", + "name": "_ipfsUrl", + "type": "string" + }, + { + "internalType": "uint256", + "name": "_start", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "_end", + "type": "uint256" + } + ], + "name": "createCoupon", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_id", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "_updateEndDate", + "type": "uint256" + } + ], + "name": "editEndDate", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + } + ], + "name": "getApproved", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "internalType": "address", + "name": "operator", + "type": "address" + } + ], + "name": "isApprovedForAll", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "name", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "owner", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + } + ], + "name": "ownerOf", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_driverAddress", + "type": "address" + }, + { + "internalType": "uint256", + "name": "_id", + "type": "uint256" + } + ], + "name": "redeemCoupon", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_id", + "type": "uint256" + }, + { + "internalType": "address[]", + "name": "_allAddress", + "type": "address[]" + } + ], + "name": "removeCouponLinkToUser", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "renounceOwnership", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + } + ], + "name": "safeTransferFrom", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + }, + { + "internalType": "bytes", + "name": "data", + "type": "bytes" + } + ], + "name": "safeTransferFrom", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "operator", + "type": "address" + }, + { + "internalType": "bool", + "name": "approved", + "type": "bool" + } + ], + "name": "setApprovalForAll", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes4", + "name": "interfaceId", + "type": "bytes4" + } + ], + "name": "supportsInterface", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "symbol", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + } + ], + "name": "tokenURI", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "totalCouponsCount", + "outputs": [ + { + "internalType": "uint256", + "name": "totalCount", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + } + ], + "name": "transferFrom", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "newOwner", + "type": "address" + } + ], + "name": "transferOwnership", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "viewAllCreatedTokens", + "outputs": [ + { + "components": [ + { + "internalType": "uint256", + "name": "_tokenId", + "type": "uint256" + }, + { + "internalType": "string", + "name": "_tokenURL", + "type": "string" + }, + { + "internalType": "uint256", + "name": "_startDate", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "_endDate", + "type": "uint256" + } + ], + "internalType": "struct Coupon.tokenSpecification[]", + "name": "tokenspec", + "type": "tuple[]" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_couponId", + "type": "uint256" + } + ], + "name": "viewCouponStatus", + "outputs": [ + { + "internalType": "bool", + "name": "status", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_ad", + "type": "address" + }, + { + "internalType": "uint256", + "name": "_couponId", + "type": "uint256" + } + ], + "name": "viewCouponValidityToUser", + "outputs": [ + { + "internalType": "uint256", + "name": "couponId", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "startDate", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "endDate", + "type": "uint256" + }, + { + "internalType": "string", + "name": "couponStatus", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + } + ], + "bin": "60c0604052600e60809081526d436f75706f6e206973204c69766560901b60a052600b906200002f908262000288565b5060408051808201909152600e81526d10dbdd5c1bdb88115e1c1a5c995960921b6020820152600c9062000064908262000288565b503480156200007257600080fd5b5060405162002ab038038062002ab0833981016040819052620000959162000354565b6040518060400160405280601a81526020017f4469676974616c2043726564656e7469616c20436f75706f6e7300000000000081525060405180604001604052806002815260200161444360f01b8152508160009081620000f7919062000288565b50600162000106828262000288565b505050620001236200011d6200018d60201b60201c565b62000191565b6001600160a01b03166000818152600d60205260408120805460ff191660019081179091556007805491820181559091527fa66cc928b5edb82af9bd49922954155ab7b0942694bea4ce44661d9a8736c6880180546001600160a01b031916909117905562000386565b3390565b600680546001600160a01b038381166001600160a01b0319831681179093556040519116919082907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e090600090a35050565b634e487b7160e01b600052604160045260246000fd5b600181811c908216806200020e57607f821691505b6020821081036200022f57634e487b7160e01b600052602260045260246000fd5b50919050565b601f8211156200028357600081815260208120601f850160051c810160208610156200025e5750805b601f850160051c820191505b818110156200027f578281556001016200026a565b5050505b505050565b81516001600160401b03811115620002a457620002a4620001e3565b620002bc81620002b58454620001f9565b8462000235565b602080601f831160018114620002f45760008415620002db5750858301515b600019600386901b1c1916600185901b1785556200027f565b600085815260208120601f198616915b82811015620003255788860151825594840194600190910190840162000304565b5085821015620003445787850151600019600388901b60f8161c191681555b5050505050600190811b01905550565b6000602082840312156200036757600080fd5b81516001600160a01b03811681146200037f57600080fd5b9392505050565b61271a80620003966000396000f3fe608060405234801561001057600080fd5b50600436106101da5760003560e01c80638da5cb5b11610104578063c63a47ab116100a2578063d5dd026211610071578063d5dd026214610426578063e985e9c51461042e578063f2fde38b14610441578063fdcc12ec1461045457600080fd5b8063c63a47ab146103d7578063c87b56dd146103f8578063c9436dca1461040b578063cdbedd291461041e57600080fd5b8063a7ba18d3116100de578063a7ba18d31461037b578063adb075bb1461038e578063b88d4fde146103b1578063be0772cd146103c457600080fd5b80638da5cb5b1461034f57806395d89b4114610360578063a22cb4651461036857600080fd5b806342842e0e1161017c5780636af465211161014b5780636af4652114610300578063704802751461031357806370a0823114610326578063715018a61461034757600080fd5b806342842e0e146102a557806343607ef9146102b85780636229352d146102d85780636352211e146102ed57600080fd5b8063095ea7b3116101b8578063095ea7b31461024757806323b872dd1461025c5780632a505ecd1461026f57806330635cd31461029257600080fd5b806301ffc9a7146101df57806306fdde0314610207578063081812fc1461021c575b600080fd5b6101f26101ed366004611d78565b610469565b60405190151581526020015b60405180910390f35b61020f6104bb565b6040516101fe9190611de5565b61022f61022a366004611df8565b61054d565b6040516001600160a01b0390911681526020016101fe565b61025a610255366004611e28565b610574565b005b61025a61026a366004611e52565b61068e565b6101f261027d366004611df8565b6000908152600e602052604090205460ff1690565b61025a6102a0366004611e28565b6106bf565b61025a6102b3366004611e52565b610925565b6102cb6102c6366004611df8565b610940565b6040516101fe9190611ec9565b6102e06109a2565b6040516101fe9190611edc565b61022f6102fb366004611df8565b610c0d565b6101f261030e366004611df8565b610c6d565b61025a610321366004611f67565b610cb7565b610339610334366004611f67565b610d66565b6040519081526020016101fe565b61025a610dec565b6006546001600160a01b031661022f565b61020f610e00565b61025a610376366004611f82565b610e0f565b61025a610389366004612005565b610e1e565b6103a161039c366004611e28565b610edc565b6040516101fe94939291906120be565b61025a6103bf366004612145565b61100f565b61025a6103d23660046121c1565b611047565b6103ea6103e5366004611f67565b61117b565b6040516101fe9291906121e3565b61020f610406366004611df8565b6111f9565b61025a610419366004612205565b61126d565b6102cb6113cf565b600854610339565b6101f261043c366004612270565b611426565b61025a61044f366004611f67565b611454565b61045c6114cd565b6040516101fe91906122a3565b60006001600160e01b031982166380ac58cd60e01b148061049a57506001600160e01b03198216635b5e139f60e01b145b806104b557506301ffc9a760e01b6001600160e01b03198316145b92915050565b6060600080546104ca906122f0565b80601f01602080910402602001604051908101604052809291908181526020018280546104f6906122f0565b80156105435780601f1061051857610100808354040283529160200191610543565b820191906000526020600020905b81548152906001019060200180831161052657829003601f168201915b5050505050905090565b60006105588261152e565b506000908152600460205260409020546001600160a01b031690565b600061057f82610c0d565b9050806001600160a01b0316836001600160a01b0316036105f15760405162461bcd60e51b815260206004820152602160248201527f4552433732313a20617070726f76616c20746f2063757272656e74206f776e656044820152603960f91b60648201526084015b60405180910390fd5b336001600160a01b038216148061060d575061060d8133611426565b61067f5760405162461bcd60e51b815260206004820152603d60248201527f4552433732313a20617070726f76652063616c6c6572206973206e6f7420746f60448201527f6b656e206f776e6572206f7220617070726f76656420666f7220616c6c00000060648201526084016105e8565b610689838361158d565b505050565b61069833826115fb565b6106b45760405162461bcd60e51b81526004016105e890612324565b61068983838361165a565b6006546001600160a01b031633148015906106ea5750336000908152600d602052604090205460ff16155b15610708576040516355098f2760e01b815260040160405180910390fd5b6001600a600082825461071b9190612387565b9091555050600a5461072d9082612387565b6009556000818152600e602052604090205460ff1661075f5760405163d64927f160e01b815260040160405180910390fd5b6000818152600f60205260409020600201544210801561078f57506000818152600f602052604090206003015442115b156107ad5760405163067149c160e11b815260040160405180910390fd5b6001600160a01b038216600090815260116020908152604080832084845290915290205460ff16156107f2576040516306d3830f60e21b815260040160405180910390fd5b6107fe826009546117be565b6000818152600f6020908152604080832080546001600160a01b0387168552601090935292209081556001908101916108389101826123e8565b506000818152600f602090815260408083206002808201546001600160a01b0388168652601085528386209182015560039182015491015560129091528120805460019290610888908490612387565b90915550506001600160a01b038216600081815260146020908152604080832080546001818101835591855283852001869055848452601183528184208685528352818420805460ff19168217905560158352818420600980548254938401835591865293852090910155905490519092849290917ff3a670cd3af7d64b488926880889d08a8585a138ff455227af6737339a1ec2629190a45050565b6106898383836040518060200160405280600081525061100f565b60008181526015602090815260409182902080548351818402810184019094528084526060939283018282801561099657602002820191906000526020600020905b815481526020019060010190808311610982575b50505050509050919050565b60085460609060009067ffffffffffffffff8111156109c3576109c3611fbe565b604051908082528060200260200182016040528015610a1f57816020015b610a0c6040518060800160405280600081526020016060815260200160008152602001600081525090565b8152602001906001900390816109e15790505b50905060005b600854811015610c0757610a5a6040518060800160405280600081526020016060815260200160008152602001600081525090565b600f600060088481548110610a7157610a716124c9565b9060005260206000200154815260200190815260200160002060000154816000018181525050600f600060088481548110610aae57610aae6124c9565b906000526020600020015481526020019081526020016000206001018054610ad5906122f0565b80601f0160208091040260200160405190810160405280929190818152602001828054610b01906122f0565b8015610b4e5780601f10610b2357610100808354040283529160200191610b4e565b820191906000526020600020905b815481529060010190602001808311610b3157829003601f168201915b50505050508160200181905250600f600060088481548110610b7257610b726124c9565b9060005260206000200154815260200190815260200160002060020154816040018181525050600f600060088481548110610baf57610baf6124c9565b906000526020600020015481526020019081526020016000206003015481606001818152505080838381518110610be857610be86124c9565b6020026020010181905250508080610bff906124df565b915050610a25565b50919050565b6000818152600260205260408120546001600160a01b0316806104b55760405162461bcd60e51b8152602060048201526018602482015277115490cdcc8c4e881a5b9d985b1a59081d1bdad95b88125160421b60448201526064016105e8565b6000818152600f602052604081206002015442118015610c9d57506000828152600f602052604090206003015442105b15610caa57506001919050565b506000919050565b919050565b6006546001600160a01b03163314801590610ce25750336000908152600d602052604090205460ff16155b15610d00576040516355098f2760e01b815260040160405180910390fd5b6001600160a01b03166000818152600d60205260408120805460ff191660019081179091556007805491820181559091527fa66cc928b5edb82af9bd49922954155ab7b0942694bea4ce44661d9a8736c6880180546001600160a01b0319169091179055565b60006001600160a01b038216610dd05760405162461bcd60e51b815260206004820152602960248201527f4552433732313a2061646472657373207a65726f206973206e6f7420612076616044820152683634b21037bbb732b960b91b60648201526084016105e8565b506001600160a01b031660009081526003602052604090205490565b610df4611949565b610dfe60006119a3565b565b6060600180546104ca906122f0565b610e1a3383836119f5565b5050565b6006546001600160a01b03163314801590610e495750336000908152600d602052604090205460ff16155b15610e67576040516355098f2760e01b815260040160405180910390fd5b60005b815181101561068957600060116000848481518110610e8b57610e8b6124c9565b6020908102919091018101516001600160a01b0316825281810192909252604090810160009081208782529092529020805460ff191691151591909117905580610ed4816124df565b915050610e6a565b6001600160a01b03821660009081526011602090815260408083208484529091528120548190819060609060ff168015610f1a5750610f1a85610c6d565b15610fd9576000858152600f6020526040902080546002820154600390920154600b80549293928190610f4c906122f0565b80601f0160208091040260200160405190810160405280929190818152602001828054610f78906122f0565b8015610fc55780601f10610f9a57610100808354040283529160200191610fc5565b820191906000526020600020905b815481529060010190602001808311610fa857829003601f168201915b505050505090509350935093509350611006565b6000858152600f6020526040902080546002820154600390920154600c80549293928190610f4c906122f0565b92959194509250565b61101933836115fb565b6110355760405162461bcd60e51b81526004016105e890612324565b61104184848484611ac3565b50505050565b6006546001600160a01b031633148015906110725750336000908152600d602052604090205460ff16155b15611090576040516355098f2760e01b815260040160405180910390fd5b6000828152600e602052604090205460ff166110bf57604051631c5fa2d760e11b815260040160405180910390fd5b6000828152600f602052604090206003015481116111385760405162461bcd60e51b815260206004820152603060248201527f656e746572206461746520746861742069732067726561746572207468616e2060448201526f63757272656e7420656e64206461746560801b60648201526084016105e8565b6000828152600f6020526040808220600301839055518291849133917f96c723fe83be55414610a7288b245abb669b6bea0f33b412df67ce5bbaa909ab91a45050565b6001600160a01b03811660009081526014602090815260408083206012835281842054815483518186028101860190945280845260609594929391928491908301828280156111e957602002820191906000526020600020905b8154815260200190600101908083116111d5575b5050505050915091509150915091565b60606112048261152e565b600061121b60408051602081019091526000815290565b9050600081511161123b5760405180602001604052806000815250611266565b8061124584611af6565b6040516020016112569291906124f8565b6040516020818303038152906040525b9392505050565b6006546001600160a01b031633148015906112985750336000908152600d602052604090205460ff16155b156112b6576040516355098f2760e01b815260040160405180910390fd5b6000848152600e602052604090205460ff16156112e657604051631ae7c5a760e31b815260040160405180910390fd5b6000848152600f602052604090208481556001016113048482612527565b506000848152600f60209081526040808320600281018690556003018490556008805460018082019092557ff3f7a9fe364faab93b216da50a3214154f22a0a2b415b23a84c8169e8b636ee301889055600e9283905292819020805460ff191690931790925590516d10dbdd5c1bdb8810dc99585d195960921b81520160405180910390208360405161139791906125e1565b6040519081900381209086907f6f3b17c16f055f0938f0aaaa94437fb1f24d3e2f75d4186b015712a95ecf7daa90600090a450505050565b6060600880548060200260200160405190810160405280929190818152602001828054801561054357602002820191906000526020600020905b815481526020019060010190808311611409575050505050905090565b6001600160a01b03918216600090815260056020908152604080832093909416825291909152205460ff1690565b61145c611949565b6001600160a01b0381166114c15760405162461bcd60e51b815260206004820152602660248201527f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160448201526564647265737360d01b60648201526084016105e8565b6114ca816119a3565b50565b6060600780548060200260200160405190810160405280929190818152602001828054801561054357602002820191906000526020600020905b81546001600160a01b03168152600190910190602001808311611507575050505050905090565b6000818152600260205260409020546001600160a01b03166114ca5760405162461bcd60e51b8152602060048201526018602482015277115490cdcc8c4e881a5b9d985b1a59081d1bdad95b88125160421b60448201526064016105e8565b600081815260046020526040902080546001600160a01b0319166001600160a01b03841690811790915581906115c282610c0d565b6001600160a01b03167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92560405160405180910390a45050565b60008061160783610c0d565b9050806001600160a01b0316846001600160a01b0316148061162e575061162e8185611426565b806116525750836001600160a01b03166116478461054d565b6001600160a01b0316145b949350505050565b826001600160a01b031661166d82610c0d565b6001600160a01b0316146116935760405162461bcd60e51b81526004016105e8906125fd565b6001600160a01b0382166116f55760405162461bcd60e51b8152602060048201526024808201527f4552433732313a207472616e7366657220746f20746865207a65726f206164646044820152637265737360e01b60648201526084016105e8565b826001600160a01b031661170882610c0d565b6001600160a01b03161461172e5760405162461bcd60e51b81526004016105e8906125fd565b600081815260046020908152604080832080546001600160a01b03199081169091556001600160a01b0387811680865260038552838620805460001901905590871680865283862080546001019055868652600290945282852080549092168417909155905184937fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef91a4505050565b6001600160a01b0382166118145760405162461bcd60e51b815260206004820181905260248201527f4552433732313a206d696e7420746f20746865207a65726f206164647265737360448201526064016105e8565b6000818152600260205260409020546001600160a01b0316156118795760405162461bcd60e51b815260206004820152601c60248201527f4552433732313a20746f6b656e20616c7265616479206d696e7465640000000060448201526064016105e8565b6000818152600260205260409020546001600160a01b0316156118de5760405162461bcd60e51b815260206004820152601c60248201527f4552433732313a20746f6b656e20616c7265616479206d696e7465640000000060448201526064016105e8565b6001600160a01b038216600081815260036020908152604080832080546001019055848352600290915280822080546001600160a01b0319168417905551839291907fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef908290a45050565b6006546001600160a01b03163314610dfe5760405162461bcd60e51b815260206004820181905260248201527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e657260448201526064016105e8565b600680546001600160a01b038381166001600160a01b0319831681179093556040519116919082907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e090600090a35050565b816001600160a01b0316836001600160a01b031603611a565760405162461bcd60e51b815260206004820152601960248201527f4552433732313a20617070726f766520746f2063616c6c65720000000000000060448201526064016105e8565b6001600160a01b03838116600081815260056020908152604080832094871680845294825291829020805460ff191686151590811790915591519182527f17307eab39ab6107e8899845ad3d59bd9653f200f220920489ca2b5937696c31910160405180910390a3505050565b611ace84848461165a565b611ada84848484611b89565b6110415760405162461bcd60e51b81526004016105e890612642565b60606000611b0383611c8a565b600101905060008167ffffffffffffffff811115611b2357611b23611fbe565b6040519080825280601f01601f191660200182016040528015611b4d576020820181803683370190505b5090508181016020015b600019016f181899199a1a9b1b9c1cb0b131b232b360811b600a86061a8153600a8504945084611b5757509392505050565b60006001600160a01b0384163b15611c7f57604051630a85bd0160e11b81526001600160a01b0385169063150b7a0290611bcd903390899088908890600401612694565b6020604051808303816000875af1925050508015611c08575060408051601f3d908101601f19168201909252611c05918101906126c7565b60015b611c65573d808015611c36576040519150601f19603f3d011682016040523d82523d6000602084013e611c3b565b606091505b508051600003611c5d5760405162461bcd60e51b81526004016105e890612642565b805181602001fd5b6001600160e01b031916630a85bd0160e11b149050611652565b506001949350505050565b60008072184f03e93ff9f4daa797ed6e38ed64bf6a1f0160401b8310611cc95772184f03e93ff9f4daa797ed6e38ed64bf6a1f0160401b830492506040015b6d04ee2d6d415b85acef81000000008310611cf5576d04ee2d6d415b85acef8100000000830492506020015b662386f26fc100008310611d1357662386f26fc10000830492506010015b6305f5e1008310611d2b576305f5e100830492506008015b6127108310611d3f57612710830492506004015b60648310611d51576064830492506002015b600a83106104b55760010192915050565b6001600160e01b0319811681146114ca57600080fd5b600060208284031215611d8a57600080fd5b813561126681611d62565b60005b83811015611db0578181015183820152602001611d98565b50506000910152565b60008151808452611dd1816020860160208601611d95565b601f01601f19169290920160200192915050565b6020815260006112666020830184611db9565b600060208284031215611e0a57600080fd5b5035919050565b80356001600160a01b0381168114610cb257600080fd5b60008060408385031215611e3b57600080fd5b611e4483611e11565b946020939093013593505050565b600080600060608486031215611e6757600080fd5b611e7084611e11565b9250611e7e60208501611e11565b9150604084013590509250925092565b600081518084526020808501945080840160005b83811015611ebe57815187529582019590820190600101611ea2565b509495945050505050565b6020815260006112666020830184611e8e565b60006020808301818452808551808352604092508286019150828160051b87010184880160005b83811015611f5957603f19898403018552815160808151855288820151818a870152611f3182870182611db9565b838a0151878b0152606093840151939096019290925250509386019390860190600101611f03565b509098975050505050505050565b600060208284031215611f7957600080fd5b61126682611e11565b60008060408385031215611f9557600080fd5b611f9e83611e11565b915060208301358015158114611fb357600080fd5b809150509250929050565b634e487b7160e01b600052604160045260246000fd5b604051601f8201601f1916810167ffffffffffffffff81118282101715611ffd57611ffd611fbe565b604052919050565b6000806040838503121561201857600080fd5b8235915060208084013567ffffffffffffffff8082111561203857600080fd5b818601915086601f83011261204c57600080fd5b81358181111561205e5761205e611fbe565b8060051b915061206f848301611fd4565b818152918301840191848101908984111561208957600080fd5b938501935b838510156120ae5761209f85611e11565b8252938501939085019061208e565b8096505050505050509250929050565b8481528360208201528260408201526080606082015260006120e36080830184611db9565b9695505050505050565b600067ffffffffffffffff83111561210757612107611fbe565b61211a601f8401601f1916602001611fd4565b905082815283838301111561212e57600080fd5b828260208301376000602084830101529392505050565b6000806000806080858703121561215b57600080fd5b61216485611e11565b935061217260208601611e11565b925060408501359150606085013567ffffffffffffffff81111561219557600080fd5b8501601f810187136121a657600080fd5b6121b5878235602084016120ed565b91505092959194509250565b600080604083850312156121d457600080fd5b50508035926020909101359150565b6040815260006121f66040830185611e8e565b90508260208301529392505050565b6000806000806080858703121561221b57600080fd5b84359350602085013567ffffffffffffffff81111561223957600080fd5b8501601f8101871361224a57600080fd5b612259878235602084016120ed565b949794965050505060408301359260600135919050565b6000806040838503121561228357600080fd5b61228c83611e11565b915061229a60208401611e11565b90509250929050565b6020808252825182820181905260009190848201906040850190845b818110156122e45783516001600160a01b0316835292840192918401916001016122bf565b50909695505050505050565b600181811c9082168061230457607f821691505b602082108103610c0757634e487b7160e01b600052602260045260246000fd5b6020808252602d908201527f4552433732313a2063616c6c6572206973206e6f7420746f6b656e206f776e6560408201526c1c881bdc88185c1c1c9bdd9959609a1b606082015260800190565b634e487b7160e01b600052601160045260246000fd5b808201808211156104b5576104b5612371565b601f82111561068957600081815260208120601f850160051c810160208610156123c15750805b601f850160051c820191505b818110156123e0578281556001016123cd565b505050505050565b8181036123f3575050565b6123fd82546122f0565b67ffffffffffffffff81111561241557612415611fbe565b6124298161242384546122f0565b8461239a565b6000601f82116001811461245d57600083156124455750848201545b600019600385901b1c1916600184901b1784556124c2565b600085815260209020601f19841690600086815260209020845b838110156124975782860154825560019586019590910190602001612477565b50858310156124b55781850154600019600388901b60f8161c191681555b50505060018360011b0184555b5050505050565b634e487b7160e01b600052603260045260246000fd5b6000600182016124f1576124f1612371565b5060010190565b6000835161250a818460208801611d95565b83519083019061251e818360208801611d95565b01949350505050565b815167ffffffffffffffff81111561254157612541611fbe565b61254f8161242384546122f0565b602080601f831160018114612584576000841561256c5750858301515b600019600386901b1c1916600185901b1785556123e0565b600085815260208120601f198616915b828110156125b357888601518255948401946001909101908401612594565b50858210156125d15787850151600019600388901b60f8161c191681555b5050505050600190811b01905550565b600082516125f3818460208701611d95565b9190910192915050565b60208082526025908201527f4552433732313a207472616e736665722066726f6d20696e636f72726563742060408201526437bbb732b960d91b606082015260800190565b60208082526032908201527f4552433732313a207472616e7366657220746f206e6f6e20455243373231526560408201527131b2b4bb32b91034b6b83632b6b2b73a32b960711b606082015260800190565b6001600160a01b03858116825284166020820152604081018390526080606082018190526000906120e390830184611db9565b6000602082840312156126d957600080fd5b815161126681611d6256fea2646970667358221220a32fe3590864a4a4659a172f5e4205c6ad535c4ab2f8fd140b666e1ec101ce4264736f6c6343000811003300000000000000000000000018b169b811af233cbea0c44f03d0b1e42e8f2db8" + } + }, + "version": "0.8.11+commit.d7f03943.Darwin.appleclang" +} \ No newline at end of file diff --git a/test/data/contracts/reverter/reverter.json b/test/data/contracts/reverter/reverter.json new file mode 100644 index 0000000000..7084eb7510 --- /dev/null +++ b/test/data/contracts/reverter/reverter.json @@ -0,0 +1 @@ +{"contracts":{"reverter.sol:Reverter":{"abi":[{"inputs":[],"name":"goBang","outputs":[],"stateMutability":"pure","type":"function"}],"bin":"608060405234801561001057600080fd5b5061011b806100206000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063060846fb14602d575b600080fd5b60336035565b005b6040517f08c379a000000000000000000000000000000000000000000000000000000000815260040160659060c7565b60405180910390fd5b600082825260208201905092915050565b7f42616e6721000000000000000000000000000000000000000000000000000000600082015250565b600060b3600583606e565b915060bc82607f565b602082019050919050565b6000602082019050818103600083015260de8160a8565b905091905056fea26469706673582212204c5a121fa1ad563532a26d368380482d34f0eee629e860671f518ec7af2fc2c064736f6c63430008170033"}},"version":"0.8.23+commit.f704f362.Darwin.appleclang"} diff --git a/test/data/contracts/reverter/reverter.sol b/test/data/contracts/reverter/reverter.sol new file mode 100644 index 0000000000..4628d5efba --- /dev/null +++ b/test/data/contracts/reverter/reverter.sol @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: Apache-2.0 + +pragma solidity >=0.6.0 <0.9.0; + +contract Reverter { + function goBang() pure public { + revert("Bang!"); + } +} \ No newline at end of file diff --git a/test/e2e/client/restclient.go b/test/e2e/client/restclient.go index 1615edc402..abd5c368a0 100644 --- a/test/e2e/client/restclient.go +++ b/test/e2e/client/restclient.go @@ -549,12 +549,13 @@ func (client *FireFlyClient) CreateDatatype(t *testing.T, datatype *core.Datatyp return &dtReturn } -func (client *FireFlyClient) CreateTokenPool(t *testing.T, pool *core.TokenPool, confirm bool) *core.TokenPool { +func (client *FireFlyClient) CreateTokenPool(t *testing.T, pool *core.TokenPool, publish, confirm bool) *core.TokenPool { var poolOut core.TokenPool path := client.namespaced(urlTokenPools) resp, err := client.Client.R(). SetBody(pool). SetQueryParam("confirm", strconv.FormatBool(confirm)). + SetQueryParam("publish", strconv.FormatBool(publish)). SetResult(&poolOut). Post(path) require.NoError(t, err) @@ -566,6 +567,22 @@ func (client *FireFlyClient) CreateTokenPool(t *testing.T, pool *core.TokenPool, return &poolOut } +func (client *FireFlyClient) PublishTokenPool(t *testing.T, poolID *fftypes.UUID, networkName string, confirm bool) { + path := client.namespaced(urlTokenPools + "/" + poolID.String() + "/publish") + resp, err := client.Client.R(). + SetBody(&core.DefinitionPublish{ + NetworkName: networkName, + }). + SetQueryParam("confirm", strconv.FormatBool(confirm)). + Post(path) + require.NoError(t, err) + expected := 202 + if confirm { + expected = 200 + } + require.Equal(t, expected, resp.StatusCode(), "POST %s [%d]: %s", path, resp.StatusCode(), resp.String()) +} + func (client *FireFlyClient) GetTokenPools(t *testing.T, startTime time.Time) (pools []*core.TokenPool) { path := client.namespaced(urlTokenPools) resp, err := client.Client.R(). @@ -587,6 +604,13 @@ func (client *FireFlyClient) GetTokenPool(t *testing.T, poolID *fftypes.UUID) (p return pool } +func (client *FireFlyClient) DeleteTokenPool(t *testing.T, poolID *fftypes.UUID, expectedStatus int) { + path := client.namespaced(urlTokenPools + "/" + poolID.String()) + resp, err := client.Client.R().Delete(path) + require.NoError(t, err) + require.Equal(t, expectedStatus, resp.StatusCode(), "DELETE %s [%d]: %s", path, resp.StatusCode(), resp.String()) +} + func (client *FireFlyClient) MintTokens(t *testing.T, mint *core.TokenTransferInput, confirm bool, expectedStatus ...int) *core.TokenTransfer { var transferOut core.TokenTransfer path := client.namespaced(urlTokenMint) @@ -800,15 +824,20 @@ func (client *FireFlyClient) DeleteContractListener(t *testing.T, id *fftypes.UU func (client *FireFlyClient) InvokeContractMethod(t *testing.T, req *core.ContractCallRequest, expectedStatus ...int) (interface{}, error) { var res interface{} path := client.namespaced(urlContractInvoke) + var errResult fftypes.RESTError resp, err := client.Client.R(). SetBody(req). SetResult(&res). + SetError(&errResult). Post(path) require.NoError(t, err) if len(expectedStatus) == 0 { expectedStatus = []int{202} } require.Equal(t, expectedStatus[0], resp.StatusCode(), "POST %s [%d]: %s", path, resp.StatusCode(), resp.String()) + if err == nil && errResult.Error != "" { + return res, fmt.Errorf(errResult.Error) + } return res, err } @@ -836,36 +865,104 @@ func (client *FireFlyClient) GenerateFFIFromABI(t *testing.T, req *fftypes.FFIGe return &res } -func (client *FireFlyClient) CreateFFI(t *testing.T, ffi *fftypes.FFI) (*fftypes.FFI, error) { +func (client *FireFlyClient) CreateFFI(t *testing.T, ffi *fftypes.FFI, publish bool) (*fftypes.FFI, error) { var res fftypes.FFI path := client.namespaced(urlContractInterface) resp, err := client.Client.R(). SetBody(ffi). SetResult(&res). SetQueryParam("confirm", "true"). + SetQueryParam("publish", strconv.FormatBool(publish)). Post(path) require.NoError(t, err) require.Equal(t, 200, resp.StatusCode(), "POST %s [%d]: %s", path, resp.StatusCode(), resp.String()) return &res, err } -func (client *FireFlyClient) CreateContractAPI(t *testing.T, name string, ffiReference *fftypes.FFIReference, location *fftypes.JSONAny) (interface{}, error) { +func (client *FireFlyClient) GetFFI(t *testing.T, name, version string) (result *fftypes.FFI) { + path := client.namespaced(urlContractInterface + "/" + name + "/" + version) + resp, err := client.Client.R(). + SetResult(&result). + Get(path) + require.NoError(t, err) + require.Equal(t, 200, resp.StatusCode(), "GET %s [%d]: %s", path, resp.StatusCode(), resp.String()) + return result +} + +func (client *FireFlyClient) PublishFFI(t *testing.T, name, version, networkName string, confirm bool) { + path := client.namespaced(urlContractInterface + "/" + name + "/" + version + "/publish") + resp, err := client.Client.R(). + SetBody(&core.DefinitionPublish{ + NetworkName: networkName, + }). + SetQueryParam("confirm", strconv.FormatBool(confirm)). + Post(path) + require.NoError(t, err) + expected := 202 + if confirm { + expected = 200 + } + require.Equal(t, expected, resp.StatusCode(), "POST %s [%d]: %s", path, resp.StatusCode(), resp.String()) +} + +func (client *FireFlyClient) DeleteFFI(t *testing.T, id *fftypes.UUID, expectedStatus int) { + path := client.namespaced(urlContractInterface + "/" + id.String()) + resp, err := client.Client.R().Delete(path) + require.NoError(t, err) + require.Equal(t, expectedStatus, resp.StatusCode(), "DELETE %s [%d]: %s", path, resp.StatusCode(), resp.String()) +} + +func (client *FireFlyClient) CreateContractAPI(t *testing.T, name string, ffiReference *fftypes.FFIReference, location *fftypes.JSONAny, publish bool) (*core.ContractAPI, error) { apiReqBody := &core.ContractAPI{ Name: name, Interface: ffiReference, Location: location, } - var res interface{} + var res core.ContractAPI path := client.namespaced(urlContractAPI) resp, err := client.Client.R(). SetBody(apiReqBody). SetResult(&res). SetQueryParam("confirm", "true"). + SetQueryParam("publish", strconv.FormatBool(publish)). Post(path) require.NoError(t, err) require.Equal(t, 200, resp.StatusCode(), "POST %s [%d]: %s", path, resp.StatusCode(), resp.String()) - return res, err + return &res, err +} + +func (client *FireFlyClient) GetContractAPI(t *testing.T, name string) (result *core.ContractAPI) { + path := client.namespaced(urlContractAPI + "/" + name) + resp, err := client.Client.R(). + SetResult(&result). + Get(path) + require.NoError(t, err) + require.Equal(t, 200, resp.StatusCode(), "GET %s [%d]: %s", path, resp.StatusCode(), resp.String()) + return result +} + +func (client *FireFlyClient) PublishContractAPI(t *testing.T, name, networkName string, confirm bool) { + path := client.namespaced(urlContractAPI + "/" + name + "/publish") + resp, err := client.Client.R(). + SetBody(&core.DefinitionPublish{ + NetworkName: networkName, + }). + SetQueryParam("confirm", strconv.FormatBool(confirm)). + Post(path) + require.NoError(t, err) + expected := 202 + if confirm { + expected = 200 + } + require.Equal(t, expected, resp.StatusCode(), "POST %s [%d]: %s", path, resp.StatusCode(), resp.String()) +} + +func (client *FireFlyClient) DeleteContractAPI(t *testing.T, name string, expectedStatus int) { + path := client.namespaced(urlContractAPI + "/" + name) + resp, err := client.Client.R().Delete(path) + require.NoError(t, err) + require.Equal(t, expectedStatus, resp.StatusCode(), "DELETE %s [%d]: %s", path, resp.StatusCode(), resp.String()) } func (client *FireFlyClient) InvokeContractAPIMethod(t *testing.T, apiName string, methodName string, input *fftypes.JSONAny) (interface{}, error) { diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 04c45221d5..c96928aefd 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -256,3 +256,17 @@ func VerifyAllOperationsSucceeded(t *testing.T, clients []*client.FireFlyClient, assert.Fail(t, pending) } + +func VerifyOperationsAlreadyMarkedFailed(t *testing.T, clients []*client.FireFlyClient, startTime time.Time) { + // Note we do NOT wait in this function - use this function when failure should already have been recorded, + // and the work has been done in FF Core to ensure that is reflected in the operation cache. + pending := "" + for _, client := range clients { + for _, op := range client.GetOperations(t, startTime) { + if op.Status != core.OpStatusFailed { + pending += fmt.Sprintf("Operation '%s' (%s) on '%s' status=%s\n", op.ID, op.Type, client.Client.BaseURL, op.Status) + } + } + } + assert.Empty(t, pending, pending) +} diff --git a/test/e2e/gateway/ethereum_coupon.go b/test/e2e/gateway/ethereum_coupon.go new file mode 100644 index 0000000000..e15fea2612 --- /dev/null +++ b/test/e2e/gateway/ethereum_coupon.go @@ -0,0 +1,306 @@ +// Copyright © 2023 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gateway + +import ( + "encoding/json" + "fmt" + "os/exec" + "testing" + + "github.com/aidarkhanov/nanoid" + "github.com/go-resty/resty/v2" + "github.com/hyperledger/firefly-common/pkg/fftypes" + "github.com/hyperledger/firefly/pkg/core" + "github.com/hyperledger/firefly/test/e2e" + "github.com/hyperledger/firefly/test/e2e/client" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +var couponContractVersion, _ = nanoid.Generate("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz", nanoid.DefaultSize) + +func couponFFICreated() *fftypes.FFIEvent { + return &fftypes.FFIEvent{ + FFIEventDefinition: fftypes.FFIEventDefinition{ + Name: "CouponCreated", + Params: fftypes.FFIParams{ + { + Name: "_tokenId", + Schema: fftypes.JSONAnyPtr(`{ + "oneOf": [ + { + "type": "string" + }, + { + "type": "integer" + } + ], + "details": { + "type": "uint256", + "internalType": "uint256", + "indexed": true + }, + "description": "An integer. You are recommended to use a JSON string. A JSON number can be used for values up to the safe maximum." + }`), + }, + { + Name: "_ifpsURL", + Schema: fftypes.JSONAnyPtr(`{ + "type": "string", + "details": { + "type": "string", + "internalType": "string", + "indexed": true + } + }`), + }, + { + Name: "_status", + Schema: fftypes.JSONAnyPtr(`{ + "type": "string", + "details": { + "type": "string", + "internalType": "string", + "indexed": true + } + }`), + }, + }, + }, + } +} + +func couponFFI() *fftypes.FFI { + return &fftypes.FFI{ + Name: "SimpleStorage", + Version: couponContractVersion, + Methods: []*fftypes.FFIMethod{ + couponFFICreateCoupon(), + couponFFIGetAllCouponIDs(), + }, + Events: []*fftypes.FFIEvent{ + couponFFICreated(), + }, + } +} + +func couponFFICreateCoupon() *fftypes.FFIMethod { + return &fftypes.FFIMethod{ + Name: "createCoupon", + Params: fftypes.FFIParams{ + { + Name: "_id", + Schema: fftypes.JSONAnyPtr(`{ + "oneOf": [ + { + "type": "string" + }, + { + "type": "integer" + } + ], + "details": { + "type": "uint256", + "internalType": "uint256" + }, + "description": "An integer. You are recommended to use a JSON string. A JSON number can be used for values up to the safe maximum." + }`), + }, + { + Name: "_ipfsUrl", + Schema: fftypes.JSONAnyPtr(`{ + "type": "string", + "details": { + "type": "string", + "internalType": "string" + } + }`), + }, + { + Name: "_start", + Schema: fftypes.JSONAnyPtr(`{ + "oneOf": [ + { + "type": "string" + }, + { + "type": "integer" + } + ], + "details": { + "type": "uint256", + "internalType": "uint256" + }, + "description": "An integer. You are recommended to use a JSON string. A JSON number can be used for values up to the safe maximum." + }`), + }, + { + Name: "_end", + Schema: fftypes.JSONAnyPtr(`{ + "oneOf": [ + { + "type": "string" + }, + { + "type": "integer" + } + ], + "details": { + "type": "uint256", + "internalType": "uint256" + }, + "description": "An integer. You are recommended to use a JSON string. A JSON number can be used for values up to the safe maximum." + }`), + }, + }, + Returns: fftypes.FFIParams{}, + } +} + +func couponFFIGetAllCouponIDs() *fftypes.FFIMethod { + return &fftypes.FFIMethod{ + Name: "allCouponIds", + Params: fftypes.FFIParams{}, + Returns: fftypes.FFIParams{ + { + Name: "allCreatedIds", + Schema: fftypes.JSONAnyPtr(`{ + "type": "array", + "details": { + "type": "uint256[]", + "internalType": "uint256[]" + }, + "items": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "integer" + } + ], + "description": "An integer. You are recommended to use a JSON string. A JSON number can be used for values up to the safe maximum." + } + }`), + }, + }, + } +} + +func deployCouponContract(t *testing.T, stackName, contract string, address string) string { + path := "../../data/contracts/" + contract + out, err := exec.Command("ff", "deploy", "ethereum", stackName, path, address).Output() + require.NoError(t, err) + var output map[string]interface{} + err = json.Unmarshal(out, &output) + require.NoError(t, err) + contractAddress := output["address"].(string) + t.Logf("Contract address: %s", address) + return contractAddress +} + +type EthereumCouponTestSuite struct { + suite.Suite + testState *testState + contractAddress string + interfaceID *fftypes.UUID + ethClient *resty.Client + ethIdentity string +} + +func (suite *EthereumCouponTestSuite) SetupSuite() { + suite.testState = beforeE2ETest(suite.T()) + stack := e2e.ReadStack(suite.T()) + stackState := e2e.ReadStackState(suite.T()) + suite.ethClient = client.NewResty(suite.T()) + suite.ethClient.SetBaseURL(fmt.Sprintf("http://localhost:%d", stack.Members[0].ExposedConnectorPort)) + account := stackState.Accounts[0].(map[string]interface{}) + suite.ethIdentity = account["address"].(string) + suite.contractAddress = deployCouponContract(suite.T(), stack.Name, "coupon/coupon.json", suite.ethIdentity) + + res, err := suite.testState.client1.CreateFFI(suite.T(), couponFFI(), false) + suite.interfaceID = res.ID + suite.T().Logf("interfaceID: %s", suite.interfaceID) + assert.NoError(suite.T(), err) +} + +func (suite *EthereumCouponTestSuite) BeforeTest(suiteName, testName string) { + suite.testState = beforeE2ETest(suite.T()) +} + +func (suite *EthereumCouponTestSuite) AfterTest(suiteName, testName string) { + e2e.VerifyAllOperationsSucceeded(suite.T(), []*client.FireFlyClient{suite.testState.client1}, suite.testState.startTime) +} + +func (suite *EthereumCouponTestSuite) TestDirectInvokeMethod() { + defer suite.testState.Done() + + received1 := e2e.WsReader(suite.testState.ws1) + listener := suite.testState.client1.CreateContractListener(suite.T(), couponFFICreated(), &fftypes.JSONObject{ + "address": suite.contractAddress, + }) + + listeners := suite.testState.client1.GetContractListeners(suite.T(), suite.testState.startTime) + assert.Equal(suite.T(), 1, len(listeners)) + assert.Equal(suite.T(), listener.BackendID, listeners[0].BackendID) + + location := map[string]interface{}{ + "address": suite.contractAddress, + } + locationBytes, _ := json.Marshal(location) + invokeContractRequest := &core.ContractCallRequest{ + Location: fftypes.JSONAnyPtrBytes(locationBytes), + Method: couponFFICreateCoupon(), + Input: map[string]interface{}{ + "_id": "1", + "_ipfsUrl": "https://ipfs.io/ipfs/Qmc5gCcjYypU7y28oCALwfSvxCBskLuPKWpK4qpterKC7z", + "_start": "2", + "_end": "3", + }, + } + + res, err := suite.testState.client1.InvokeContractMethod(suite.T(), invokeContractRequest) + assert.NoError(suite.T(), err) + assert.NotNil(suite.T(), res) + + match := map[string]interface{}{ + "info": map[string]interface{}{ + "address": suite.contractAddress, + }, + "output": map[string]interface{}{ + "_tokenId": "1", + "_ifpsURL": "0xaa430c2f4b1a970b28fcf799f16f539663ad0148aa133f88d622d51f36877f63", + }, + "listener": listener.ID.String(), + } + + event := e2e.WaitForContractEvent(suite.T(), suite.testState.client1, received1, match) + assert.NotNil(suite.T(), event) + + queryContractRequest := &core.ContractCallRequest{ + Location: fftypes.JSONAnyPtrBytes(locationBytes), + Method: couponFFIGetAllCouponIDs(), + } + res, err = suite.testState.client1.QueryContractMethod(suite.T(), queryContractRequest) + assert.NoError(suite.T(), err) + resJSON, err := json.Marshal(res) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), `{"allCreatedIds":["1"]}`, string(resJSON)) + suite.testState.client1.DeleteContractListener(suite.T(), listener.ID) +} diff --git a/test/e2e/gateway/ethereum_revert.go b/test/e2e/gateway/ethereum_revert.go new file mode 100644 index 0000000000..f5f72034bb --- /dev/null +++ b/test/e2e/gateway/ethereum_revert.go @@ -0,0 +1,97 @@ +// Copyright © 2023 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gateway + +import ( + "encoding/json" + "fmt" + + "github.com/go-resty/resty/v2" + "github.com/hyperledger/firefly-common/pkg/fftypes" + "github.com/hyperledger/firefly/pkg/core" + "github.com/hyperledger/firefly/test/e2e" + "github.com/hyperledger/firefly/test/e2e/client" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" +) + +type EthereumRevertTestSuite struct { + suite.Suite + testState *testState + contractAddress string + ethClient *resty.Client + ethIdentity string + abi *fftypes.JSONAny +} + +func (suite *EthereumRevertTestSuite) SetupSuite() { + suite.testState = beforeE2ETest(suite.T()) + stack := e2e.ReadStack(suite.T()) + stackState := e2e.ReadStackState(suite.T()) + suite.ethClient = client.NewResty(suite.T()) + suite.ethClient.SetBaseURL(fmt.Sprintf("http://localhost:%d", stack.Members[0].ExposedConnectorPort)) + account := stackState.Accounts[0].(map[string]interface{}) + suite.ethIdentity = account["address"].(string) + suite.abi, suite.contractAddress = deployTestContractFromCompiledJSON(suite.T(), stack.Name, "reverter/reverter.json") +} + +func (suite *EthereumRevertTestSuite) BeforeTest(suiteName, testName string) { + suite.testState = beforeE2ETest(suite.T()) +} + +func (suite *EthereumRevertTestSuite) AfterTest(suiteName, testName string) { + // Important part of the test - the status of the operation must go to Failed - immediately. + // We should not encounter an "Initialized" status + e2e.VerifyOperationsAlreadyMarkedFailed(suite.T(), []*client.FireFlyClient{suite.testState.client1}, suite.testState.startTime) +} + +func (suite *EthereumRevertTestSuite) TestRevertTransitionsToFailed() { + defer suite.testState.Done() + + type generateInput struct { + ABI *fftypes.JSONAny `json:"abi"` + } + inputBytes, err := json.Marshal(&generateInput{ABI: suite.abi}) + assert.NoError(suite.T(), err) + + ffi := suite.testState.client1.GenerateFFIFromABI(suite.T(), &fftypes.FFIGenerationRequest{ + Input: fftypes.JSONAnyPtrBytes(inputBytes), + }) + assert.NoError(suite.T(), err) + var goBang *fftypes.FFIMethod + for _, m := range ffi.Methods { + if m.Name == "goBang" { + goBang = m + } + } + assert.NotNil(suite.T(), goBang) + + location := map[string]interface{}{ + "address": suite.contractAddress, + } + locationBytes, _ := json.Marshal(location) + invokeContractRequest := &core.ContractCallRequest{ + IdempotencyKey: core.IdempotencyKey(fftypes.NewUUID().String()), + Location: fftypes.JSONAnyPtrBytes(locationBytes), + Method: goBang, + Input: map[string]interface{}{}, + } + + // Check we get the revert error all the way back through the API on the invoke, due to the gas estimation + _, err = suite.testState.client1.InvokeContractMethod(suite.T(), invokeContractRequest, 500) + assert.Regexp(suite.T(), "FF10111.*Bang!", err) +} diff --git a/test/e2e/gateway/ethereum_contracts.go b/test/e2e/gateway/ethereum_simplestorage.go similarity index 85% rename from test/e2e/gateway/ethereum_contracts.go rename to test/e2e/gateway/ethereum_simplestorage.go index 4b050434d3..f691f4aba4 100644 --- a/test/e2e/gateway/ethereum_contracts.go +++ b/test/e2e/gateway/ethereum_simplestorage.go @@ -19,6 +19,7 @@ package gateway import ( "encoding/json" "fmt" + "os" "os/exec" "testing" @@ -33,7 +34,7 @@ import ( "github.com/stretchr/testify/suite" ) -var contractVersion, _ = nanoid.Generate("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz", nanoid.DefaultSize) +var simpleStorageContractVersion, _ = nanoid.Generate("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz", nanoid.DefaultSize) func simpleStorageFFIChanged() *fftypes.FFIEvent { return &fftypes.FFIEvent{ @@ -56,7 +57,7 @@ func simpleStorageFFIChanged() *fftypes.FFIEvent { func simpleStorageFFI() *fftypes.FFI { return &fftypes.FFI{ Name: "SimpleStorage", - Version: contractVersion, + Version: simpleStorageContractVersion, Methods: []*fftypes.FFIMethod{ simpleStorageFFISet(), simpleStorageFFIGet(), @@ -93,19 +94,43 @@ func simpleStorageFFIGet() *fftypes.FFIMethod { } } -func deployContract(t *testing.T, stackName, contract string) string { +func deployTestContractFromCompiledJSON(t *testing.T, stackName, contract string) (*fftypes.JSONAny, string) { path := "../../data/contracts/" + contract out, err := exec.Command("ff", "deploy", "ethereum", stackName, path).Output() - require.NoError(t, err) + var stderr []byte + if err != nil { + stderr = err.(*exec.ExitError).Stderr + } + require.NoError(t, err, fmt.Sprintf("ff deploy failed: %s", stderr)) var output map[string]interface{} err = json.Unmarshal(out, &output) require.NoError(t, err) address := output["address"].(string) t.Logf("Contract address: %s", address) - return address + + type solcJSON struct { + Contracts map[string]struct { + ABI *fftypes.JSONAny `json:"abi"` + } `json:"contracts"` + } + b, err := os.ReadFile(path) + assert.NoError(t, err) + var contractJSON solcJSON + err = json.Unmarshal(b, &contractJSON) + assert.NoError(t, err) + + var abiBytes *fftypes.JSONAny + for _, contract := range contractJSON.Contracts { + abiBytes = contract.ABI + if abiBytes != nil { + break + } + } + assert.NotNil(t, abiBytes) + return abiBytes, address } -type EthereumContractTestSuite struct { +type EthereumSimpleStorageTestSuite struct { suite.Suite testState *testState contractAddress string @@ -114,7 +139,7 @@ type EthereumContractTestSuite struct { ethIdentity string } -func (suite *EthereumContractTestSuite) SetupSuite() { +func (suite *EthereumSimpleStorageTestSuite) SetupSuite() { suite.testState = beforeE2ETest(suite.T()) stack := e2e.ReadStack(suite.T()) stackState := e2e.ReadStackState(suite.T()) @@ -122,23 +147,23 @@ func (suite *EthereumContractTestSuite) SetupSuite() { suite.ethClient.SetBaseURL(fmt.Sprintf("http://localhost:%d", stack.Members[0].ExposedConnectorPort)) account := stackState.Accounts[0].(map[string]interface{}) suite.ethIdentity = account["address"].(string) - suite.contractAddress = deployContract(suite.T(), stack.Name, "simplestorage/simple_storage.json") + _, suite.contractAddress = deployTestContractFromCompiledJSON(suite.T(), stack.Name, "simplestorage/simple_storage.json") - res, err := suite.testState.client1.CreateFFI(suite.T(), simpleStorageFFI()) + res, err := suite.testState.client1.CreateFFI(suite.T(), simpleStorageFFI(), false) suite.interfaceID = res.ID suite.T().Logf("interfaceID: %s", suite.interfaceID) assert.NoError(suite.T(), err) } -func (suite *EthereumContractTestSuite) BeforeTest(suiteName, testName string) { +func (suite *EthereumSimpleStorageTestSuite) BeforeTest(suiteName, testName string) { suite.testState = beforeE2ETest(suite.T()) } -func (suite *EthereumContractTestSuite) AfterTest(suiteName, testName string) { +func (suite *EthereumSimpleStorageTestSuite) AfterTest(suiteName, testName string) { e2e.VerifyAllOperationsSucceeded(suite.T(), []*client.FireFlyClient{suite.testState.client1}, suite.testState.startTime) } -func (suite *EthereumContractTestSuite) TestDirectInvokeMethod() { +func (suite *EthereumSimpleStorageTestSuite) TestDirectInvokeMethod() { defer suite.testState.Done() received1 := e2e.WsReader(suite.testState.ws1) @@ -192,7 +217,7 @@ func (suite *EthereumContractTestSuite) TestDirectInvokeMethod() { suite.testState.client1.DeleteContractListener(suite.T(), listener.ID) } -func (suite *EthereumContractTestSuite) TestFFIInvokeMethod() { +func (suite *EthereumSimpleStorageTestSuite) TestFFIInvokeMethod() { defer suite.testState.Done() received1 := e2e.WsReader(suite.testState.ws1) @@ -251,7 +276,7 @@ func (suite *EthereumContractTestSuite) TestFFIInvokeMethod() { suite.testState.client1.DeleteContractListener(suite.T(), listener.ID) } -func (suite *EthereumContractTestSuite) TestContractAPIMethod() { +func (suite *EthereumSimpleStorageTestSuite) TestContractAPIMethod() { defer suite.testState.Done() received1 := e2e.WsReader(suite.testState.ws1) @@ -266,7 +291,7 @@ func (suite *EthereumContractTestSuite) TestContractAPIMethod() { } locationBytes, _ := json.Marshal(location) - createContractAPIResult, err := suite.testState.client1.CreateContractAPI(suite.T(), APIName, ffiReference, fftypes.JSONAnyPtr(string(locationBytes))) + createContractAPIResult, err := suite.testState.client1.CreateContractAPI(suite.T(), APIName, ffiReference, fftypes.JSONAnyPtr(string(locationBytes)), false) assert.NotNil(suite.T(), createContractAPIResult) assert.NoError(suite.T(), err) diff --git a/test/e2e/gateway/tokens.go b/test/e2e/gateway/tokens.go index 8e67a2711e..d8673d95fe 100644 --- a/test/e2e/gateway/tokens.go +++ b/test/e2e/gateway/tokens.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -65,7 +65,7 @@ func (suite *TokensTestSuite) TestE2EFungibleTokensAsync() { Config: fftypes.JSONObject{}, } - poolResp := suite.testState.client1.CreateTokenPool(suite.T(), pool, false) + poolResp := suite.testState.client1.CreateTokenPool(suite.T(), pool, false, false) poolID := poolResp.ID e2e.WaitForEvent(suite.T(), received1, core.EventTypePoolConfirmed, poolID) @@ -96,4 +96,8 @@ func (suite *TokensTestSuite) TestE2EFungibleTokensAsync() { e2e.ValidateAccountBalances(suite.T(), suite.testState.client1, poolID, "", map[string]int64{ suite.key: 0, }) + + suite.testState.client1.DeleteTokenPool(suite.T(), poolID, 204) + pools = suite.testState.client1.GetTokenPools(suite.T(), suite.testState.startTime) + assert.Equal(suite.T(), 0, len(pools)) } diff --git a/test/e2e/gateway/tokens_only.go b/test/e2e/gateway/tokens_only.go index 7fee4f1596..0e8c54af58 100644 --- a/test/e2e/gateway/tokens_only.go +++ b/test/e2e/gateway/tokens_only.go @@ -96,7 +96,7 @@ func (suite *TokensOnlyTestSuite) TestTokensOnlyNamespaces() { Key: suite.key, Type: core.TokenTypeFungible, } - poolResp := client1.CreateTokenPool(suite.T(), pool, false) + poolResp := client1.CreateTokenPool(suite.T(), pool, false, false) poolID := poolResp.ID e2e.WaitForEvent(suite.T(), received1, core.EventTypePoolConfirmed, poolID) diff --git a/test/e2e/multiparty/common.go b/test/e2e/multiparty/common.go index 5d1e6ffd4d..41b3b667ac 100644 --- a/test/e2e/multiparty/common.go +++ b/test/e2e/multiparty/common.go @@ -1,4 +1,4 @@ -// Copyright © 2022 Kaleido, Inc. +// Copyright © 2023 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -181,7 +181,7 @@ func beforeE2ETest(t *testing.T) *testState { t.Logf("Org1: ID=%s DID=%s Key=%s", ts.org1.ID, ts.org1.DID, ts.org1key.Value) t.Logf("Org2: ID=%s DID=%s Key=%s", ts.org2.ID, ts.org2.DID, ts.org2key.Value) - eventNames := "message_confirmed|token_pool_confirmed|token_transfer_confirmed|blockchain_event_received|token_approval_confirmed|identity_confirmed|message_rejected" + eventNames := "message_confirmed|token_pool_confirmed|token_transfer_confirmed|blockchain_event_received|token_approval_confirmed|identity_confirmed|message_rejected|contract_interface_confirmed|contract_api_confirmed" queryString := fmt.Sprintf("namespace=%s&ephemeral&autoack&filter.events=%s&changeevents=.*", ts.namespace, eventNames) ts.ws1 = ts.client1.WebSocket(t, queryString, authHeader1) ts.ws2 = ts.client2.WebSocket(t, queryString, authHeader2) diff --git a/test/e2e/multiparty/ethereum_contract_message.go b/test/e2e/multiparty/ethereum_contract_message.go index d8c0e56aba..0b442a0dc6 100644 --- a/test/e2e/multiparty/ethereum_contract_message.go +++ b/test/e2e/multiparty/ethereum_contract_message.go @@ -54,10 +54,10 @@ func (suite *EthereumContractWithMessageTestSuite) TestCustomContractWithMessage ffi := suite.testState.client1.GenerateFFIFromABI(suite.T(), &fftypes.FFIGenerationRequest{ Name: "CustomPin", - Version: contractVersion, + Version: contractVersion(), Input: fftypes.JSONAnyPtr(`{"abi":` + suite.contractJSON.GetObjectArray("abi").String() + `}`), }) - iface, err := suite.testState.client1.CreateFFI(suite.T(), ffi) + iface, err := suite.testState.client1.CreateFFI(suite.T(), ffi, true) assert.NoError(suite.T(), err) status, _, err := suite.testState.client1.GetStatus() diff --git a/test/e2e/multiparty/ethereum_contracts.go b/test/e2e/multiparty/ethereum_contracts.go index 253dadffd8..06a605395d 100644 --- a/test/e2e/multiparty/ethereum_contracts.go +++ b/test/e2e/multiparty/ethereum_contracts.go @@ -35,7 +35,11 @@ import ( "github.com/stretchr/testify/suite" ) -var contractVersion, _ = nanoid.Generate("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz", nanoid.DefaultSize) +func contractVersion() string { + versionAlphabet := "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + version, _ := nanoid.Generate(versionAlphabet, nanoid.DefaultSize) + return version +} func simpleStorageFFIChanged() *fftypes.FFIEvent { return &fftypes.FFIEvent{ @@ -55,10 +59,10 @@ func simpleStorageFFIChanged() *fftypes.FFIEvent { } } -func simpleStorageFFI() *fftypes.FFI { +func simpleStorageFFI(version string) *fftypes.FFI { return &fftypes.FFI{ Name: "SimpleStorage", - Version: contractVersion, + Version: version, Methods: []*fftypes.FFIMethod{ simpleStorageFFISet(), simpleStorageFFIGet(), @@ -124,7 +128,7 @@ func (suite *EthereumContractTestSuite) SetupSuite() { suite.ethIdentity = suite.testState.org1key.Value suite.contractAddress = deployContract(suite.T(), stack.Name, "simplestorage/simple_storage.json") - res, err := suite.testState.client1.CreateFFI(suite.T(), simpleStorageFFI()) + res, err := suite.testState.client1.CreateFFI(suite.T(), simpleStorageFFI(contractVersion()), true) suite.interfaceID = res.ID suite.T().Logf("interfaceID: %s", suite.interfaceID) assert.NoError(suite.T(), err) @@ -229,7 +233,7 @@ func (suite *EthereumContractTestSuite) TestFFIInvokeMethod() { // Idempotency check _, err = suite.testState.client1.InvokeContractMethod(suite.T(), invokeContractRequest, 409) - assert.NoError(suite.T(), err) + assert.Regexp(suite.T(), "FF10431|FF10458" /* idempotency check could come from FF or blockchain connector, depending on the operation update that is async */, err) match := map[string]interface{}{ "info": map[string]interface{}{ @@ -272,7 +276,7 @@ func (suite *EthereumContractTestSuite) TestContractAPIMethod() { } locationBytes, _ := json.Marshal(location) - createContractAPIResult, err := suite.testState.client1.CreateContractAPI(suite.T(), APIName, ffiReference, fftypes.JSONAnyPtr(string(locationBytes))) + createContractAPIResult, err := suite.testState.client1.CreateContractAPI(suite.T(), APIName, ffiReference, fftypes.JSONAnyPtr(string(locationBytes)), true) assert.NotNil(suite.T(), createContractAPIResult) assert.NoError(suite.T(), err) @@ -323,3 +327,70 @@ func readContractJSON(t *testing.T, contract string) fftypes.JSONObject { assert.NoError(t, err) return jsonValue } + +func (suite *EthereumContractTestSuite) TestContractPublish() { + received1 := e2e.WsReader(suite.testState.ws1) + received2 := e2e.WsReader(suite.testState.ws2) + + ffi := simpleStorageFFI(contractVersion()) + networkName := ffi.Name + "-shared" + suite.T().Logf("Interface local name: %s", ffi.Name) + suite.T().Logf("Interface network name: %s", networkName) + suite.T().Logf("Interface version: %s", ffi.Version) + + ffiResult, err := suite.testState.client1.CreateFFI(suite.T(), ffi, false) + assert.NoError(suite.T(), err) + + e2e.WaitForEvent(suite.T(), received1, core.EventTypeContractInterfaceConfirmed, ffiResult.ID) + + // Delete and recreate + suite.testState.client1.DeleteFFI(suite.T(), ffiResult.ID, 204) + ffiResult, err = suite.testState.client1.CreateFFI(suite.T(), ffi, false) + assert.NoError(suite.T(), err) + + e2e.WaitForEvent(suite.T(), received1, core.EventTypeContractInterfaceConfirmed, ffiResult.ID) + + suite.testState.client1.PublishFFI(suite.T(), ffi.Name, ffi.Version, networkName, false) + + e2e.WaitForMessageConfirmed(suite.T(), received1, core.MessageTypeDefinition) + e2e.WaitForEvent(suite.T(), received2, core.EventTypeContractInterfaceConfirmed, ffiResult.ID) + + ffiReceived := suite.testState.client2.GetFFI(suite.T(), networkName, ffi.Version) + assert.Equal(suite.T(), networkName, ffiReceived.Name) + assert.Equal(suite.T(), networkName, ffiReceived.NetworkName) + + // Cannot delete published interfaces + suite.testState.client1.DeleteFFI(suite.T(), ffiResult.ID, 409) + + APIName := fftypes.NewUUID().String() + networkName = APIName + "-shared" + suite.T().Logf("API local name: %s", APIName) + suite.T().Logf("API network name: %s", networkName) + + apiResult, err := suite.testState.client1.CreateContractAPI(suite.T(), APIName, &fftypes.FFIReference{ + ID: ffiResult.ID, + }, nil, false) + assert.NoError(suite.T(), err) + suite.T().Logf("API ID: %s", apiResult.ID) + + e2e.WaitForEvent(suite.T(), received1, core.EventTypeContractAPIConfirmed, apiResult.ID) + + // Delete and recreate + suite.testState.client1.DeleteContractAPI(suite.T(), APIName, 204) + apiResult, err = suite.testState.client1.CreateContractAPI(suite.T(), APIName, &fftypes.FFIReference{ + ID: ffiResult.ID, + }, nil, false) + assert.NoError(suite.T(), err) + + suite.testState.client1.PublishContractAPI(suite.T(), APIName, networkName, false) + + e2e.WaitForMessageConfirmed(suite.T(), received1, core.MessageTypeDefinition) + e2e.WaitForEvent(suite.T(), received2, core.EventTypeContractAPIConfirmed, apiResult.ID) + + apiReceived := suite.testState.client2.GetContractAPI(suite.T(), networkName) + assert.Equal(suite.T(), networkName, apiReceived.Name) + assert.Equal(suite.T(), networkName, apiReceived.NetworkName) + + // Cannot delete published APIs + suite.testState.client1.DeleteContractAPI(suite.T(), APIName, 409) +} diff --git a/test/e2e/multiparty/ethereum_token_contract.go b/test/e2e/multiparty/ethereum_token_contract.go index 14eaa525e9..1aa88e2667 100644 --- a/test/e2e/multiparty/ethereum_token_contract.go +++ b/test/e2e/multiparty/ethereum_token_contract.go @@ -379,15 +379,17 @@ var expectedERC1155Methods = fftypes.JSONAnyPtr(`{ type EthereumTokenContractTestSuite struct { suite.Suite testState *testState + connector string contract string expectedMethods *fftypes.JSONAny } func (suite *EthereumTokenContractTestSuite) SetupSuite() { stack := e2e.ReadStack(suite.T()) + suite.connector = stack.TokenProviders[0] suite.contract = "erc20/ERC20OpenZeppelin.json" suite.expectedMethods = expectedERC20Methods - if stack.TokenProviders[0] == "erc1155" { + if suite.connector == "erc1155" { suite.contract = "erc1155/ERC1155Sample.json" suite.expectedMethods = expectedERC1155Methods } @@ -410,13 +412,14 @@ func (suite *EthereumTokenContractTestSuite) TestTokensWithInterface() { suite.T().Logf("contract: %s", suite.contract) contractAddress := deployContract(suite.T(), suite.testState.stackName, suite.contract) contractJSON := readContractJSON(suite.T(), suite.contract) + version := contractVersion() ffi := suite.testState.client1.GenerateFFIFromABI(suite.T(), &fftypes.FFIGenerationRequest{ - Name: "ERC20", - Version: contractVersion, + Name: "token_contract_test", + Version: version, Input: fftypes.JSONAnyPtr(`{"abi":` + contractJSON.GetObjectArray("abi").String() + `}`), }) - _, err := suite.testState.client1.CreateFFI(suite.T(), ffi) + _, err := suite.testState.client1.CreateFFI(suite.T(), ffi, true) assert.NoError(suite.T(), err) poolName := fmt.Sprintf("pool_%s", e2e.RandomName(suite.T())) @@ -427,11 +430,11 @@ func (suite *EthereumTokenContractTestSuite) TestTokensWithInterface() { "address": contractAddress, }, Interface: &fftypes.FFIReference{ - Name: "ERC20", - Version: contractVersion, + Name: "token_contract_test", + Version: version, }, } - poolResp := suite.testState.client1.CreateTokenPool(suite.T(), pool, false) + poolResp := suite.testState.client1.CreateTokenPool(suite.T(), pool, true, false) e2e.WaitForEvent(suite.T(), received1, core.EventTypePoolConfirmed, poolResp.ID) poolResp = suite.testState.client1.GetTokenPool(suite.T(), poolResp.ID) @@ -467,4 +470,13 @@ func (suite *EthereumTokenContractTestSuite) TestTokensWithInterface() { e2e.ValidateAccountBalances(suite.T(), suite.testState.client1, poolResp.ID, "", map[string]int64{ suite.testState.org1key.Value: 0, }) + + if suite.connector == "erc1155" { + // Create another pool and pass startId/endId explicitly + pool.Name = fmt.Sprintf("pool_%s", e2e.RandomName(suite.T())) + pool.Config["startId"] = "0x1" // intentionally does not match a pool generated by the factory + pool.Config["endId"] = "0x1" + poolResp = suite.testState.client1.CreateTokenPool(suite.T(), pool, true, false) + e2e.WaitForEvent(suite.T(), received1, core.EventTypePoolConfirmed, poolResp.ID) + } } diff --git a/test/e2e/multiparty/tokens.go b/test/e2e/multiparty/tokens.go index e7817d3bf1..f05e803264 100644 --- a/test/e2e/multiparty/tokens.go +++ b/test/e2e/multiparty/tokens.go @@ -61,7 +61,7 @@ func (suite *TokensTestSuite) TestE2EFungibleTokensAsync() { Config: fftypes.JSONObject{}, } - poolResp := suite.testState.client1.CreateTokenPool(suite.T(), pool, false) + poolResp := suite.testState.client1.CreateTokenPool(suite.T(), pool, true, false) poolID := poolResp.ID e2e.WaitForEvent(suite.T(), received1, core.EventTypePoolConfirmed, poolID) @@ -72,15 +72,17 @@ func (suite *TokensTestSuite) TestE2EFungibleTokensAsync() { assert.Equal(suite.T(), poolName, pools[0].Name) assert.Equal(suite.T(), core.TokenTypeFungible, pools[0].Type) assert.NotEmpty(suite.T(), pools[0].Locator) + assert.NotNil(suite.T(), pools[0].Message) e2e.WaitForEvent(suite.T(), received2, core.EventTypePoolConfirmed, poolID) - pools = suite.testState.client1.GetTokenPools(suite.T(), suite.testState.startTime) + pools = suite.testState.client2.GetTokenPools(suite.T(), suite.testState.startTime) assert.Equal(suite.T(), 1, len(pools)) assert.Equal(suite.T(), suite.testState.namespace, pools[0].Namespace) assert.Equal(suite.T(), suite.connector, pools[0].Connector) assert.Equal(suite.T(), poolName, pools[0].Name) assert.Equal(suite.T(), core.TokenTypeFungible, pools[0].Type) assert.NotEmpty(suite.T(), pools[0].Locator) + assert.NotNil(suite.T(), pools[0].Message) approval := &core.TokenApprovalInput{ TokenApproval: core.TokenApproval{ @@ -216,6 +218,9 @@ func (suite *TokensTestSuite) TestE2EFungibleTokensAsync() { assert.Equal(suite.T(), *poolID, *accountPools[0].Pool) accountPools = suite.testState.client2.GetTokenAccountPools(suite.T(), suite.testState.org2key.Value) assert.Equal(suite.T(), *poolID, *accountPools[0].Pool) + + // Cannot delete pools in multiparty mode + suite.testState.client1.DeleteTokenPool(suite.T(), poolID, 409) } func (suite *TokensTestSuite) TestE2ENonFungibleTokensSync() { @@ -231,11 +236,12 @@ func (suite *TokensTestSuite) TestE2ENonFungibleTokensSync() { Config: fftypes.JSONObject{}, } - poolOut := suite.testState.client1.CreateTokenPool(suite.T(), pool, true) + poolOut := suite.testState.client1.CreateTokenPool(suite.T(), pool, true, true) assert.Equal(suite.T(), suite.testState.namespace, poolOut.Namespace) assert.Equal(suite.T(), poolName, poolOut.Name) assert.Equal(suite.T(), core.TokenTypeNonFungible, poolOut.Type) assert.NotEmpty(suite.T(), poolOut.Locator) + assert.NotNil(suite.T(), poolOut.Message) poolID := poolOut.ID @@ -247,6 +253,7 @@ func (suite *TokensTestSuite) TestE2ENonFungibleTokensSync() { assert.Equal(suite.T(), poolName, pools[0].Name) assert.Equal(suite.T(), core.TokenTypeNonFungible, pools[0].Type) assert.NotEmpty(suite.T(), pools[0].Locator) + assert.NotNil(suite.T(), pools[0].Message) approval := &core.TokenApprovalInput{ TokenApproval: core.TokenApproval{ @@ -371,4 +378,53 @@ func (suite *TokensTestSuite) TestE2ENonFungibleTokensSync() { assert.Equal(suite.T(), *poolID, *accountPools[0].Pool) accountPools = suite.testState.client2.GetTokenAccountPools(suite.T(), suite.testState.org2key.Value) assert.Equal(suite.T(), *poolID, *accountPools[0].Pool) + + // Cannot delete published pools + suite.testState.client1.DeleteTokenPool(suite.T(), poolID, 409) +} + +func (suite *TokensTestSuite) TestE2ETokenPoolPublish() { + received1 := e2e.WsReader(suite.testState.ws1) + received2 := e2e.WsReader(suite.testState.ws2) + + poolName := fmt.Sprintf("pool_%s", e2e.RandomName(suite.T())) + localName := poolName + "-local" + networkName := poolName + "-shared" + suite.T().Logf("Pool local name: %s", localName) + suite.T().Logf("Pool network name: %s", networkName) + + pool := &core.TokenPool{ + Name: localName, + Type: core.TokenTypeFungible, + Config: fftypes.JSONObject{}, + } + + poolResp := suite.testState.client1.CreateTokenPool(suite.T(), pool, false, false) + poolID := poolResp.ID + + e2e.WaitForEvent(suite.T(), received1, core.EventTypePoolConfirmed, poolID) + pools := suite.testState.client1.GetTokenPools(suite.T(), suite.testState.startTime) + assert.Equal(suite.T(), 1, len(pools)) + assert.Equal(suite.T(), suite.testState.namespace, pools[0].Namespace) + assert.Equal(suite.T(), suite.connector, pools[0].Connector) + assert.Equal(suite.T(), localName, pools[0].Name) // sending party uses local name + assert.Equal(suite.T(), core.TokenTypeFungible, pools[0].Type) + assert.NotEmpty(suite.T(), pools[0].Locator) + assert.Nil(suite.T(), pools[0].Message) + + suite.testState.client1.PublishTokenPool(suite.T(), poolID, networkName, false) + + e2e.WaitForMessageConfirmed(suite.T(), received1, core.MessageTypeDefinition) + e2e.WaitForEvent(suite.T(), received2, core.EventTypePoolConfirmed, poolID) + pools = suite.testState.client2.GetTokenPools(suite.T(), suite.testState.startTime) + assert.Equal(suite.T(), 1, len(pools)) + assert.Equal(suite.T(), suite.testState.namespace, pools[0].Namespace) + assert.Equal(suite.T(), suite.connector, pools[0].Connector) + assert.Equal(suite.T(), networkName, pools[0].Name) // receiving party gets network name + assert.Equal(suite.T(), core.TokenTypeFungible, pools[0].Type) + assert.NotEmpty(suite.T(), pools[0].Locator) + assert.NotNil(suite.T(), pools[0].Message) + + // Cannot delete published pools + suite.testState.client1.DeleteTokenPool(suite.T(), poolID, 409) } diff --git a/test/e2e/multiparty/tokens_remote_name.go b/test/e2e/multiparty/tokens_remote_name.go index c6ece024e9..8af34688fe 100644 --- a/test/e2e/multiparty/tokens_remote_name.go +++ b/test/e2e/multiparty/tokens_remote_name.go @@ -89,7 +89,7 @@ func (suite *TokensRemoteNameTestSuite) TestE2EFungibleTokensWithRemoteNameAsync Config: fftypes.JSONObject{}, } - poolResp := client1.CreateTokenPool(suite.T(), pool, false) + poolResp := client1.CreateTokenPool(suite.T(), pool, true, false) poolID := poolResp.ID e2e.WaitForEvent(suite.T(), received1, core.EventTypePoolConfirmed, poolID) diff --git a/test/e2e/runners/ethereum_gateway_test.go b/test/e2e/runners/ethereum_gateway_test.go index 91afbd057e..00aca2d4c2 100644 --- a/test/e2e/runners/ethereum_gateway_test.go +++ b/test/e2e/runners/ethereum_gateway_test.go @@ -25,6 +25,17 @@ import ( func TestEthereumGatewayE2ESuite(t *testing.T) { suite.Run(t, new(gateway.TokensTestSuite)) - suite.Run(t, new(gateway.EthereumContractTestSuite)) + suite.Run(t, new(gateway.EthereumCouponTestSuite)) + suite.Run(t, new(gateway.EthereumSimpleStorageTestSuite)) + suite.Run(t, new(gateway.EthereumRevertTestSuite)) + suite.Run(t, new(gateway.TokensOnlyTestSuite)) +} + +func TestEthereumGatewayLegacyEthE2ESuite(t *testing.T) { + // Note EthereumRevertTestSuite does not work with legacy EthConnect, as the + // submissionRejected boolean is only supported by the EVMConnect (FFTM) generation. + suite.Run(t, new(gateway.TokensTestSuite)) + suite.Run(t, new(gateway.EthereumCouponTestSuite)) + suite.Run(t, new(gateway.EthereumSimpleStorageTestSuite)) suite.Run(t, new(gateway.TokensOnlyTestSuite)) }