From 120321eae3b34e8ad0f0545a0d9b99a3c7e16474 Mon Sep 17 00:00:00 2001 From: Benjamin DENEUX Date: Fri, 21 Jun 2024 17:24:28 +0200 Subject: [PATCH 1/4] fix: oneOf generation with title set --- pkg/codegen/oneof.go | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/pkg/codegen/oneof.go b/pkg/codegen/oneof.go index 973d13b..61d56b6 100644 --- a/pkg/codegen/oneof.go +++ b/pkg/codegen/oneof.go @@ -14,18 +14,19 @@ func generateFieldsFromOneOf(oneOf []*schemas.JSONSchema, typePrefix string) []j fields := []jen.Code{} for _, schema := range oneOf { name := schema.Title - if schema.Title == "" { - if len(schema.Properties) != 1 { - panic(fmt.Errorf("cannot determine the name of the field %v", schema)) - } - for k, prop := range schema.Properties { + if schema.Title == "" && len(schema.Properties) != 1 { + panic(fmt.Errorf("cannot determine the name of the field %v", schema)) + } + + for k, prop := range schema.Properties { + if schema.Title == "" { name = k + } - typeName := typePrefix + strcase.ToCamel(k) + typeName := typePrefix + strcase.ToCamel(k) - RegisterDefinition(typeName, prop) - } + RegisterDefinition(typeName, prop) } RegisterDefinitions(schema.Definitions) From ab7ebb099e4f8aa2dd359634d547bece9ddda122 Mon Sep 17 00:00:00 2001 From: srdtrk Date: Sat, 22 Jun 2024 01:57:04 +0400 Subject: [PATCH 2/4] fix: patched json key issue --- pkg/codegen/oneof.go | 28 +++++++++++++++++++++------- pkg/codegen/properties.go | 13 +++++++------ 2 files changed, 28 insertions(+), 13 deletions(-) diff --git a/pkg/codegen/oneof.go b/pkg/codegen/oneof.go index 61d56b6..8c267c9 100644 --- a/pkg/codegen/oneof.go +++ b/pkg/codegen/oneof.go @@ -13,18 +13,19 @@ func generateFieldsFromOneOf(oneOf []*schemas.JSONSchema, typePrefix string) []j ptrFalse := false fields := []jen.Code{} for _, schema := range oneOf { - name := schema.Title - if schema.Title == "" && len(schema.Properties) != 1 { panic(fmt.Errorf("cannot determine the name of the field %v", schema)) } + var ( + name string + jsonKey string + ) + // NOTE: there is only one property in this map for k, prop := range schema.Properties { - if schema.Title == "" { - name = k - } + name, jsonKey = validatedTitleAndJSONKey(schema.Title, k) - typeName := typePrefix + strcase.ToCamel(k) + typeName := typePrefix + strcase.ToCamel(jsonKey) RegisterDefinition(typeName, prop) } @@ -34,7 +35,20 @@ func generateFieldsFromOneOf(oneOf []*schemas.JSONSchema, typePrefix string) []j // add comment fields = append(fields, jen.Comment(schema.Description)) // add field - fields = append(fields, generateFieldFromSchema(name, schema, &ptrFalse, typePrefix, true)) + fields = append(fields, generateFieldFromSchema(name, jsonKey, schema, &ptrFalse, typePrefix, true)) } return fields } + +func validatedTitleAndJSONKey(title, key string) (string, string) { + if title == "" && key == "" { + panic(fmt.Errorf("cannot determine the name of the field")) + } + if title == "" { + title = strcase.ToCamel(key) + } + if key == "" { + key = strcase.ToSnake(title) + } + return title, key +} diff --git a/pkg/codegen/properties.go b/pkg/codegen/properties.go index 6c772c9..9faae3f 100644 --- a/pkg/codegen/properties.go +++ b/pkg/codegen/properties.go @@ -18,16 +18,17 @@ func generateFieldsFromProperties(props map[string]*schemas.JSONSchema, useTags // add comment fields = append(fields, jen.Comment(schema.Description)) // add field - fields = append(fields, generateFieldFromSchema(name, schema, nil, "", useTags)) + fields = append(fields, generateFieldFromSchema(name, name, schema, nil, "", useTags)) } return fields } -func generateFieldFromSchema(name string, schema *schemas.JSONSchema, required *bool, typePrefix string, useTags bool) jen.Code { - if name == "" { - name = schema.Title +func generateFieldFromSchema(name, jsonKey string, schema *schemas.JSONSchema, required *bool, typePrefix string, useTags bool) jen.Code { + if name == "" || jsonKey == "" { + panic(fmt.Errorf("cannot determine the name of the field for schema %v", schema)) } pascalName := strcase.ToCamel(name) + snakeName := strcase.ToSnake(jsonKey) typeStr, err := getType(pascalName, schema, required, typePrefix, true) if err != nil { @@ -37,9 +38,9 @@ func generateFieldFromSchema(name string, schema *schemas.JSONSchema, required * if useTags { tags := map[string]string{} if strings.HasPrefix(typeStr, "*") { - tags["json"] = name + ",omitempty" + tags["json"] = snakeName + ",omitempty" } else { - tags["json"] = name + tags["json"] = snakeName } return jen.Id(pascalName).Op(typeStr).Tag(tags) From b1ce1fefb5571ee9cecfab70403b763c472ab405 Mon Sep 17 00:00:00 2001 From: srdtrk Date: Sat, 22 Jun 2024 01:58:22 +0400 Subject: [PATCH 3/4] test: added objectrium to integration tests --- integration_test/integration_test.go | 4 +- .../testdata/axone-objectarium.json | 1102 +++++++++++++++++ 2 files changed, 1105 insertions(+), 1 deletion(-) create mode 100644 integration_test/testdata/axone-objectarium.json diff --git a/integration_test/integration_test.go b/integration_test/integration_test.go index 304ade0..dfb719b 100644 --- a/integration_test/integration_test.go +++ b/integration_test/integration_test.go @@ -92,6 +92,7 @@ func (s *MySuite) TestMessageComposer() { s.GenerateMessageTypesTest("testdata/cw2981-royalties.json") s.GenerateMessageTypesTest("testdata/ics721.json") s.GenerateMessageTypesTest("testdata/dao-dao-core.json") + s.GenerateMessageTypesTest("testdata/axone-objectarium.json") } func (s *MySuite) TestQueryClient() { @@ -103,7 +104,8 @@ func (s *MySuite) TestQueryClient() { s.GenerateQueryClientTest("testdata/cw721-base.json") s.GenerateQueryClientTest("testdata/cw2981-royalties.json") s.GenerateQueryClientTest("testdata/ics721.json") - s.GenerateMessageTypesTest("testdata/dao-dao-core.json") + s.GenerateQueryClientTest("testdata/dao-dao-core.json") + s.GenerateQueryClientTest("testdata/axone-objectarium.json") } func (s *MySuite) TestInterchaintestScaffold() { diff --git a/integration_test/testdata/axone-objectarium.json b/integration_test/testdata/axone-objectarium.json new file mode 100644 index 0000000..6e53f16 --- /dev/null +++ b/integration_test/testdata/axone-objectarium.json @@ -0,0 +1,1102 @@ +{ + "contract_name": "axone-objectarium", + "contract_version": "5.0.0", + "idl_version": "1.0.0", + "instantiate": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "InstantiateMsg", + "description": "Instantiate messages", + "type": "object", + "required": [ + "bucket" + ], + "properties": { + "bucket": { + "description": "The name of the bucket. The name could not be empty or contains whitespaces. If name contains whitespace, they will be removed.", + "type": "string" + }, + "config": { + "description": "The configuration of the bucket.", + "default": { + "accepted_compression_algorithms": [ + "passthrough", + "snappy", + "lzma" + ], + "hash_algorithm": "sha256" + }, + "allOf": [ + { + "$ref": "#/definitions/BucketConfig" + } + ] + }, + "limits": { + "description": "The limits of the bucket.", + "default": { + "max_object_pins": null, + "max_object_size": null, + "max_objects": null, + "max_total_size": null + }, + "allOf": [ + { + "$ref": "#/definitions/BucketLimits" + } + ] + }, + "pagination": { + "description": "The configuration for paginated query.", + "default": { + "default_page_size": 10, + "max_page_size": 30 + }, + "allOf": [ + { + "$ref": "#/definitions/PaginationConfig" + } + ] + } + }, + "additionalProperties": false, + "definitions": { + "BucketConfig": { + "description": "BucketConfig is the type of the configuration of a bucket.\n\nThe configuration is set at the instantiation of the bucket, and is immutable and cannot be changed. The configuration is optional and if not set, the default configuration is used.", + "type": "object", + "properties": { + "accepted_compression_algorithms": { + "description": "The acceptable compression algorithms for the objects in the bucket. If this parameter is not set, then all compression algorithms are accepted. If this parameter is set, then only the compression algorithms in the array are accepted.\n\nWhen an object is stored in the bucket without a specified compression algorithm, the first algorithm in the array is used. Therefore, the order of the algorithms in the array is significant. Typically, the most efficient compression algorithm, such as the NoCompression algorithm, should be placed first in the array.\n\nAny attempt to store an object using a different compression algorithm than the ones specified here will fail.", + "default": [ + "passthrough", + "snappy", + "lzma" + ], + "type": "array", + "items": { + "$ref": "#/definitions/CompressionAlgorithm" + } + }, + "hash_algorithm": { + "description": "The algorithm used to hash the content of the objects to generate the id of the objects. The algorithm is optional and if not set, the default algorithm is used.\n\nThe default algorithm is Sha256 if not set.", + "default": "sha256", + "allOf": [ + { + "$ref": "#/definitions/HashAlgorithm" + } + ] + } + }, + "additionalProperties": false + }, + "BucketLimits": { + "description": "BucketLimits is the type of the limits of a bucket.\n\nThe limits are optional and if not set, there is no limit.", + "type": "object", + "properties": { + "max_object_pins": { + "description": "The maximum number of pins in the bucket for an object.", + "anyOf": [ + { + "$ref": "#/definitions/Uint128" + }, + { + "type": "null" + } + ] + }, + "max_object_size": { + "description": "The maximum size of the objects in the bucket.", + "anyOf": [ + { + "$ref": "#/definitions/Uint128" + }, + { + "type": "null" + } + ] + }, + "max_objects": { + "description": "The maximum number of objects in the bucket.", + "anyOf": [ + { + "$ref": "#/definitions/Uint128" + }, + { + "type": "null" + } + ] + }, + "max_total_size": { + "description": "The maximum total size of the objects in the bucket.", + "anyOf": [ + { + "$ref": "#/definitions/Uint128" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "CompressionAlgorithm": { + "description": "CompressionAlgorithm is an enumeration that defines the different compression algorithms supported for compressing the content of objects. The compression algorithm specified here are relevant algorithms for compressing data on-chain, which means that they are fast to compress and decompress, and have a low computational cost.\n\nThe order of the compression algorithms is based on their estimated computational cost (quite opinionated) during both compression and decompression, ranging from the lowest to the highest. This particular order is utilized to establish the default compression algorithm for storing an object.", + "oneOf": [ + { + "title": "Passthrough", + "description": "Represents no compression algorithm. The object is stored as is without any compression.", + "type": "string", + "enum": [ + "passthrough" + ] + }, + { + "title": "Snappy", + "description": "Represents the Snappy algorithm. Snappy is a compression/decompression algorithm that does not aim for maximum compression. Instead, it aims for very high speeds and reasonable compression.\n\nSee [the snappy web page](https://google.github.io/snappy/) for more information.", + "type": "string", + "enum": [ + "snappy" + ] + }, + { + "title": "Lzma", + "description": "Represents the LZMA algorithm. LZMA is a lossless data compression/decompression algorithm that features a high compression ratio and a variable compression-dictionary size up to 4 GB.\n\nSee [the LZMA wiki page](https://en.wikipedia.org/wiki/Lempel%E2%80%93Ziv%E2%80%93Markov_chain_algorithm) for more information.", + "type": "string", + "enum": [ + "lzma" + ] + } + ] + }, + "HashAlgorithm": { + "description": "HashAlgorithm is an enumeration that defines the different hash algorithms supported for hashing the content of objects.", + "oneOf": [ + { + "title": "MD5", + "description": "Represents the MD5 algorithm. MD5 is a widely used cryptographic hash function that produces a 128-bit hash value. The computational cost of MD5 is relatively low compared to other hash functions, but its short hash length makes it easier to find hash collisions. It is now considered insecure for cryptographic purposes, but can still used in non-security contexts.\n\nMD5 hashes are stored on-chain as 32 hexadecimal characters.\n\nSee [the MD5 Wikipedia page](https://en.wikipedia.org/wiki/MD5) for more information.", + "type": "string", + "enum": [ + "m_d5" + ] + }, + { + "title": "SHA1", + "description": "Represents the SHA-224 algorithm. SHA-224 is a variant of the SHA-2 family of hash functions that produces a 224-bit hash value. It is similar to SHA-256, but with a shorter output size. The computational cost of SHA-224 is moderate, and its relatively short hash length makes it easier to store and transmit.\n\nSHA-224 hashes are stored on-chain as 56 hexadecimal characters.\n\nSee [the SHA-2 Wikipedia page](https://en.wikipedia.org/wiki/SHA-2) for more information.", + "type": "string", + "enum": [ + "sha224" + ] + }, + { + "title": "SHA256", + "description": "Represents the SHA-256 algorithm. SHA-256 is a member of the SHA-2 family of hash functions that produces a 256-bit hash value. It is widely used in cryptography and other security-related applications. The computational cost of SHA-256 is moderate, and its hash length strikes a good balance between security and convenience.\n\nSHA-256 hashes are stored on-chain as 64 hexadecimal characters.\n\nSee [the SHA-2 Wikipedia page](https://en.wikipedia.org/wiki/SHA-2) for more information.", + "type": "string", + "enum": [ + "sha256" + ] + }, + { + "title": "SHA384", + "description": "Represents the SHA-384 algorithm. SHA-384 is a variant of the SHA-2 family of hash functions that produces a 384-bit hash value. It is similar to SHA-512, but with a shorter output size. The computational cost of SHA-384 is relatively high, but its longer hash length provides better security against hash collisions.\n\nSHA-384 hashes are stored on-chain as 96 hexadecimal characters.\n\nSee [the SHA-2 Wikipedia page](https://en.wikipedia.org/wiki/SHA-2) for more information.", + "type": "string", + "enum": [ + "sha384" + ] + }, + { + "title": "SHA512", + "description": "Represents the SHA-512 algorithm. SHA-512 is a member of the SHA-2 family of hash functions that produces a 512-bit hash value. It is widely used in cryptography and other security-related applications. The computational cost of SHA-512 is relatively high, but its longer hash length provides better security against hash collisions.\n\nSHA-512 hashes are stored on-chain as 128 hexadecimal characters.\n\nSee [the SHA-2 Wikipedia page](https://en.wikipedia.org/wiki/SHA-2) for more information.", + "type": "string", + "enum": [ + "sha512" + ] + } + ] + }, + "PaginationConfig": { + "description": "PaginationConfig is the type carrying configuration for paginated queries.\n\nThe fields are optional and if not set, there is a default configuration.", + "type": "object", + "properties": { + "default_page_size": { + "description": "The default number of elements in a page.\n\nShall be less or equal than `max_page_size`. Default to '10' if not set.", + "default": 10, + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "max_page_size": { + "description": "The maximum elements a page can contain.\n\nShall be less than `u32::MAX - 1`. Default to '30' if not set.", + "default": 30, + "type": "integer", + "format": "uint32", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + "Uint128": { + "description": "A thin wrapper around u128 that is using strings for JSON encoding/decoding, such that the full u128 range can be used for clients that convert JSON numbers to floats, like JavaScript and jq.\n\n# Examples\n\nUse `from` to create instances of this and `u128` to get the value out:\n\n``` # use cosmwasm_std::Uint128; let a = Uint128::from(123u128); assert_eq!(a.u128(), 123);\n\nlet b = Uint128::from(42u64); assert_eq!(b.u128(), 42);\n\nlet c = Uint128::from(70u32); assert_eq!(c.u128(), 70); ```", + "type": "string" + } + } + }, + "execute": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "ExecuteMsg", + "description": "Execute messages", + "oneOf": [ + { + "title": "StoreObject", + "description": "StoreObject store an object to the bucket and make the sender the owner of the object. The object is referenced by the hash of its content and this value is returned. If the object is already stored, it is a no-op. It may be pinned though.\n\nThe \"pin\" parameter specifies if the object should be pinned for the sender. In such case, the object cannot be removed (forget) from the storage.\n\nThe \"compression_algorithm\" parameter specifies the algorithm for compressing the object before storing it in the storage, which is optional. If no algorithm is specified, the algorithm used is the first algorithm of the bucket configuration limits. Note that the chosen algorithm can save storage space, but it will increase CPU usage. Depending on the chosen compression algorithm and the achieved compression ratio, the gas cost of the operation will vary, either increasing or decreasing.", + "type": "object", + "required": [ + "store_object" + ], + "properties": { + "store_object": { + "type": "object", + "required": [ + "data", + "pin" + ], + "properties": { + "compression_algorithm": { + "description": "Specifies the compression algorithm to use when storing the object. If None, the first algorithm specified in the list of accepted compression algorithms of the bucket is used (see [BucketLimits::accepted_compression_algorithms]).", + "anyOf": [ + { + "$ref": "#/definitions/CompressionAlgorithm" + }, + { + "type": "null" + } + ] + }, + "data": { + "description": "The content of the object to store.", + "allOf": [ + { + "$ref": "#/definitions/Binary" + } + ] + }, + "pin": { + "description": "Specifies if the object should be pinned for the sender.", + "type": "boolean" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "title": "ForgetObject", + "description": "ForgetObject first unpin the object from the bucket for the considered sender, then remove it from the storage if it is not pinned anymore. If the object is pinned for other senders, it is not removed from the storage and an error is returned. If the object is not pinned for the sender, this is a no-op.", + "type": "object", + "required": [ + "forget_object" + ], + "properties": { + "forget_object": { + "type": "object", + "required": [ + "id" + ], + "properties": { + "id": { + "type": "string" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "title": "PinObject", + "description": "PinObject pins the object in the bucket for the considered sender. If the object is already pinned for the sender, this is a no-op. While an object is pinned, it cannot be removed from the storage.", + "type": "object", + "required": [ + "pin_object" + ], + "properties": { + "pin_object": { + "type": "object", + "required": [ + "id" + ], + "properties": { + "id": { + "type": "string" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "title": "UnpinObject", + "description": "UnpinObject unpins the object in the bucket for the considered sender. If the object is not pinned for the sender, this is a no-op. The object can be removed from the storage if it is not pinned anymore.", + "type": "object", + "required": [ + "unpin_object" + ], + "properties": { + "unpin_object": { + "type": "object", + "required": [ + "id" + ], + "properties": { + "id": { + "type": "string" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + ], + "definitions": { + "Binary": { + "description": "Binary is a wrapper around Vec to add base64 de/serialization with serde. It also adds some helper methods to help encode inline.\n\nThis is only needed as serde-json-{core,wasm} has a horrible encoding for Vec. See also .", + "type": "string" + }, + "CompressionAlgorithm": { + "description": "CompressionAlgorithm is an enumeration that defines the different compression algorithms supported for compressing the content of objects. The compression algorithm specified here are relevant algorithms for compressing data on-chain, which means that they are fast to compress and decompress, and have a low computational cost.\n\nThe order of the compression algorithms is based on their estimated computational cost (quite opinionated) during both compression and decompression, ranging from the lowest to the highest. This particular order is utilized to establish the default compression algorithm for storing an object.", + "oneOf": [ + { + "title": "Passthrough", + "description": "Represents no compression algorithm. The object is stored as is without any compression.", + "type": "string", + "enum": [ + "passthrough" + ] + }, + { + "title": "Snappy", + "description": "Represents the Snappy algorithm. Snappy is a compression/decompression algorithm that does not aim for maximum compression. Instead, it aims for very high speeds and reasonable compression.\n\nSee [the snappy web page](https://google.github.io/snappy/) for more information.", + "type": "string", + "enum": [ + "snappy" + ] + }, + { + "title": "Lzma", + "description": "Represents the LZMA algorithm. LZMA is a lossless data compression/decompression algorithm that features a high compression ratio and a variable compression-dictionary size up to 4 GB.\n\nSee [the LZMA wiki page](https://en.wikipedia.org/wiki/Lempel%E2%80%93Ziv%E2%80%93Markov_chain_algorithm) for more information.", + "type": "string", + "enum": [ + "lzma" + ] + } + ] + } + } + }, + "query": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "QueryMsg", + "description": "Query messages", + "oneOf": [ + { + "title": "Bucket", + "description": "Bucket returns the bucket information.", + "type": "object", + "required": [ + "bucket" + ], + "properties": { + "bucket": { + "type": "object", + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "title": "Object", + "description": "Object returns the object information with the given id.", + "type": "object", + "required": [ + "object" + ], + "properties": { + "object": { + "type": "object", + "required": [ + "id" + ], + "properties": { + "id": { + "description": "The id of the object to get.", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "title": "Objects", + "description": "Objects returns the list of objects in the bucket with support for pagination.", + "type": "object", + "required": [ + "objects" + ], + "properties": { + "objects": { + "type": "object", + "properties": { + "address": { + "description": "The owner of the objects to get.", + "type": [ + "string", + "null" + ] + }, + "after": { + "description": "The point in the sequence to start returning objects.", + "type": [ + "string", + "null" + ] + }, + "first": { + "description": "The number of objects to return.", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "title": "ObjectData", + "description": "ObjectData returns the content of the object with the given id.", + "type": "object", + "required": [ + "object_data" + ], + "properties": { + "object_data": { + "type": "object", + "required": [ + "id" + ], + "properties": { + "id": { + "description": "The id of the object to get.", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "title": "ObjectPins", + "description": "ObjectPins returns the list of addresses that pinned the object with the given id with support for pagination.", + "type": "object", + "required": [ + "object_pins" + ], + "properties": { + "object_pins": { + "type": "object", + "required": [ + "id" + ], + "properties": { + "after": { + "description": "The point in the sequence to start returning pins.", + "type": [ + "string", + "null" + ] + }, + "first": { + "description": "The number of pins to return.", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + }, + "id": { + "description": "The id of the object to get the pins for.", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + ] + }, + "migrate": null, + "sudo": null, + "responses": { + "bucket": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "BucketResponse", + "description": "BucketResponse is the response of the Bucket query.", + "type": "object", + "required": [ + "config", + "limits", + "name", + "pagination", + "stat" + ], + "properties": { + "config": { + "description": "The configuration of the bucket.", + "allOf": [ + { + "$ref": "#/definitions/BucketConfig" + } + ] + }, + "limits": { + "description": "The limits of the bucket.", + "allOf": [ + { + "$ref": "#/definitions/BucketLimits" + } + ] + }, + "name": { + "description": "The name of the bucket.", + "type": "string" + }, + "pagination": { + "description": "The configuration for paginated query.", + "allOf": [ + { + "$ref": "#/definitions/PaginationConfig" + } + ] + }, + "stat": { + "description": "The statistics of the bucket.", + "allOf": [ + { + "$ref": "#/definitions/BucketStat" + } + ] + } + }, + "additionalProperties": false, + "definitions": { + "BucketConfig": { + "description": "BucketConfig is the type of the configuration of a bucket.\n\nThe configuration is set at the instantiation of the bucket, and is immutable and cannot be changed. The configuration is optional and if not set, the default configuration is used.", + "type": "object", + "properties": { + "accepted_compression_algorithms": { + "description": "The acceptable compression algorithms for the objects in the bucket. If this parameter is not set, then all compression algorithms are accepted. If this parameter is set, then only the compression algorithms in the array are accepted.\n\nWhen an object is stored in the bucket without a specified compression algorithm, the first algorithm in the array is used. Therefore, the order of the algorithms in the array is significant. Typically, the most efficient compression algorithm, such as the NoCompression algorithm, should be placed first in the array.\n\nAny attempt to store an object using a different compression algorithm than the ones specified here will fail.", + "default": [ + "passthrough", + "snappy", + "lzma" + ], + "type": "array", + "items": { + "$ref": "#/definitions/CompressionAlgorithm" + } + }, + "hash_algorithm": { + "description": "The algorithm used to hash the content of the objects to generate the id of the objects. The algorithm is optional and if not set, the default algorithm is used.\n\nThe default algorithm is Sha256 if not set.", + "default": "sha256", + "allOf": [ + { + "$ref": "#/definitions/HashAlgorithm" + } + ] + } + }, + "additionalProperties": false + }, + "BucketLimits": { + "description": "BucketLimits is the type of the limits of a bucket.\n\nThe limits are optional and if not set, there is no limit.", + "type": "object", + "properties": { + "max_object_pins": { + "description": "The maximum number of pins in the bucket for an object.", + "anyOf": [ + { + "$ref": "#/definitions/Uint128" + }, + { + "type": "null" + } + ] + }, + "max_object_size": { + "description": "The maximum size of the objects in the bucket.", + "anyOf": [ + { + "$ref": "#/definitions/Uint128" + }, + { + "type": "null" + } + ] + }, + "max_objects": { + "description": "The maximum number of objects in the bucket.", + "anyOf": [ + { + "$ref": "#/definitions/Uint128" + }, + { + "type": "null" + } + ] + }, + "max_total_size": { + "description": "The maximum total size of the objects in the bucket.", + "anyOf": [ + { + "$ref": "#/definitions/Uint128" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "BucketStat": { + "title": "BucketStat", + "description": "BucketStat is the type of the statistics of a bucket.", + "type": "object", + "required": [ + "compressed_size", + "object_count", + "size" + ], + "properties": { + "compressed_size": { + "description": "The total size of the objects contained in the bucket after compression.", + "allOf": [ + { + "$ref": "#/definitions/Uint128" + } + ] + }, + "object_count": { + "description": "The number of objects in the bucket.", + "allOf": [ + { + "$ref": "#/definitions/Uint128" + } + ] + }, + "size": { + "description": "The total size of the objects contained in the bucket.", + "allOf": [ + { + "$ref": "#/definitions/Uint128" + } + ] + } + }, + "additionalProperties": false + }, + "CompressionAlgorithm": { + "description": "CompressionAlgorithm is an enumeration that defines the different compression algorithms supported for compressing the content of objects. The compression algorithm specified here are relevant algorithms for compressing data on-chain, which means that they are fast to compress and decompress, and have a low computational cost.\n\nThe order of the compression algorithms is based on their estimated computational cost (quite opinionated) during both compression and decompression, ranging from the lowest to the highest. This particular order is utilized to establish the default compression algorithm for storing an object.", + "oneOf": [ + { + "title": "Passthrough", + "description": "Represents no compression algorithm. The object is stored as is without any compression.", + "type": "string", + "enum": [ + "passthrough" + ] + }, + { + "title": "Snappy", + "description": "Represents the Snappy algorithm. Snappy is a compression/decompression algorithm that does not aim for maximum compression. Instead, it aims for very high speeds and reasonable compression.\n\nSee [the snappy web page](https://google.github.io/snappy/) for more information.", + "type": "string", + "enum": [ + "snappy" + ] + }, + { + "title": "Lzma", + "description": "Represents the LZMA algorithm. LZMA is a lossless data compression/decompression algorithm that features a high compression ratio and a variable compression-dictionary size up to 4 GB.\n\nSee [the LZMA wiki page](https://en.wikipedia.org/wiki/Lempel%E2%80%93Ziv%E2%80%93Markov_chain_algorithm) for more information.", + "type": "string", + "enum": [ + "lzma" + ] + } + ] + }, + "HashAlgorithm": { + "description": "HashAlgorithm is an enumeration that defines the different hash algorithms supported for hashing the content of objects.", + "oneOf": [ + { + "title": "MD5", + "description": "Represents the MD5 algorithm. MD5 is a widely used cryptographic hash function that produces a 128-bit hash value. The computational cost of MD5 is relatively low compared to other hash functions, but its short hash length makes it easier to find hash collisions. It is now considered insecure for cryptographic purposes, but can still used in non-security contexts.\n\nMD5 hashes are stored on-chain as 32 hexadecimal characters.\n\nSee [the MD5 Wikipedia page](https://en.wikipedia.org/wiki/MD5) for more information.", + "type": "string", + "enum": [ + "m_d5" + ] + }, + { + "title": "SHA1", + "description": "Represents the SHA-224 algorithm. SHA-224 is a variant of the SHA-2 family of hash functions that produces a 224-bit hash value. It is similar to SHA-256, but with a shorter output size. The computational cost of SHA-224 is moderate, and its relatively short hash length makes it easier to store and transmit.\n\nSHA-224 hashes are stored on-chain as 56 hexadecimal characters.\n\nSee [the SHA-2 Wikipedia page](https://en.wikipedia.org/wiki/SHA-2) for more information.", + "type": "string", + "enum": [ + "sha224" + ] + }, + { + "title": "SHA256", + "description": "Represents the SHA-256 algorithm. SHA-256 is a member of the SHA-2 family of hash functions that produces a 256-bit hash value. It is widely used in cryptography and other security-related applications. The computational cost of SHA-256 is moderate, and its hash length strikes a good balance between security and convenience.\n\nSHA-256 hashes are stored on-chain as 64 hexadecimal characters.\n\nSee [the SHA-2 Wikipedia page](https://en.wikipedia.org/wiki/SHA-2) for more information.", + "type": "string", + "enum": [ + "sha256" + ] + }, + { + "title": "SHA384", + "description": "Represents the SHA-384 algorithm. SHA-384 is a variant of the SHA-2 family of hash functions that produces a 384-bit hash value. It is similar to SHA-512, but with a shorter output size. The computational cost of SHA-384 is relatively high, but its longer hash length provides better security against hash collisions.\n\nSHA-384 hashes are stored on-chain as 96 hexadecimal characters.\n\nSee [the SHA-2 Wikipedia page](https://en.wikipedia.org/wiki/SHA-2) for more information.", + "type": "string", + "enum": [ + "sha384" + ] + }, + { + "title": "SHA512", + "description": "Represents the SHA-512 algorithm. SHA-512 is a member of the SHA-2 family of hash functions that produces a 512-bit hash value. It is widely used in cryptography and other security-related applications. The computational cost of SHA-512 is relatively high, but its longer hash length provides better security against hash collisions.\n\nSHA-512 hashes are stored on-chain as 128 hexadecimal characters.\n\nSee [the SHA-2 Wikipedia page](https://en.wikipedia.org/wiki/SHA-2) for more information.", + "type": "string", + "enum": [ + "sha512" + ] + } + ] + }, + "PaginationConfig": { + "description": "PaginationConfig is the type carrying configuration for paginated queries.\n\nThe fields are optional and if not set, there is a default configuration.", + "type": "object", + "properties": { + "default_page_size": { + "description": "The default number of elements in a page.\n\nShall be less or equal than `max_page_size`. Default to '10' if not set.", + "default": 10, + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "max_page_size": { + "description": "The maximum elements a page can contain.\n\nShall be less than `u32::MAX - 1`. Default to '30' if not set.", + "default": 30, + "type": "integer", + "format": "uint32", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + "Uint128": { + "description": "A thin wrapper around u128 that is using strings for JSON encoding/decoding, such that the full u128 range can be used for clients that convert JSON numbers to floats, like JavaScript and jq.\n\n# Examples\n\nUse `from` to create instances of this and `u128` to get the value out:\n\n``` # use cosmwasm_std::Uint128; let a = Uint128::from(123u128); assert_eq!(a.u128(), 123);\n\nlet b = Uint128::from(42u64); assert_eq!(b.u128(), 42);\n\nlet c = Uint128::from(70u32); assert_eq!(c.u128(), 70); ```", + "type": "string" + } + } + }, + "object": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "ObjectResponse", + "description": "ObjectResponse is the response of the Object query.", + "type": "object", + "required": [ + "compressed_size", + "compression_algorithm", + "id", + "is_pinned", + "owner", + "size" + ], + "properties": { + "compressed_size": { + "description": "The size of the object when compressed. If the object is not compressed, the value is the same as `size`.", + "allOf": [ + { + "$ref": "#/definitions/Uint128" + } + ] + }, + "compression_algorithm": { + "description": "The compression algorithm used to compress the content of the object.", + "allOf": [ + { + "$ref": "#/definitions/CompressionAlgorithm" + } + ] + }, + "id": { + "description": "The id of the object.", + "type": "string" + }, + "is_pinned": { + "description": "Tells if the object is pinned by at least one address.", + "type": "boolean" + }, + "owner": { + "description": "The owner of the object.", + "type": "string" + }, + "size": { + "description": "The size of the object.", + "allOf": [ + { + "$ref": "#/definitions/Uint128" + } + ] + } + }, + "additionalProperties": false, + "definitions": { + "CompressionAlgorithm": { + "description": "CompressionAlgorithm is an enumeration that defines the different compression algorithms supported for compressing the content of objects. The compression algorithm specified here are relevant algorithms for compressing data on-chain, which means that they are fast to compress and decompress, and have a low computational cost.\n\nThe order of the compression algorithms is based on their estimated computational cost (quite opinionated) during both compression and decompression, ranging from the lowest to the highest. This particular order is utilized to establish the default compression algorithm for storing an object.", + "oneOf": [ + { + "title": "Passthrough", + "description": "Represents no compression algorithm. The object is stored as is without any compression.", + "type": "string", + "enum": [ + "passthrough" + ] + }, + { + "title": "Snappy", + "description": "Represents the Snappy algorithm. Snappy is a compression/decompression algorithm that does not aim for maximum compression. Instead, it aims for very high speeds and reasonable compression.\n\nSee [the snappy web page](https://google.github.io/snappy/) for more information.", + "type": "string", + "enum": [ + "snappy" + ] + }, + { + "title": "Lzma", + "description": "Represents the LZMA algorithm. LZMA is a lossless data compression/decompression algorithm that features a high compression ratio and a variable compression-dictionary size up to 4 GB.\n\nSee [the LZMA wiki page](https://en.wikipedia.org/wiki/Lempel%E2%80%93Ziv%E2%80%93Markov_chain_algorithm) for more information.", + "type": "string", + "enum": [ + "lzma" + ] + } + ] + }, + "Uint128": { + "description": "A thin wrapper around u128 that is using strings for JSON encoding/decoding, such that the full u128 range can be used for clients that convert JSON numbers to floats, like JavaScript and jq.\n\n# Examples\n\nUse `from` to create instances of this and `u128` to get the value out:\n\n``` # use cosmwasm_std::Uint128; let a = Uint128::from(123u128); assert_eq!(a.u128(), 123);\n\nlet b = Uint128::from(42u64); assert_eq!(b.u128(), 42);\n\nlet c = Uint128::from(70u32); assert_eq!(c.u128(), 70); ```", + "type": "string" + } + } + }, + "object_data": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Binary", + "description": "Binary is a wrapper around Vec to add base64 de/serialization with serde. It also adds some helper methods to help encode inline.\n\nThis is only needed as serde-json-{core,wasm} has a horrible encoding for Vec. See also .", + "type": "string" + }, + "object_pins": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "ObjectPinsResponse", + "description": "ObjectPinsResponse is the response of the GetObjectPins query.", + "type": "object", + "required": [ + "data", + "page_info" + ], + "properties": { + "data": { + "description": "The list of addresses that pinned the object.", + "type": "array", + "items": { + "type": "string" + } + }, + "page_info": { + "description": "The page information.", + "allOf": [ + { + "$ref": "#/definitions/PageInfo" + } + ] + } + }, + "additionalProperties": false, + "definitions": { + "PageInfo": { + "title": "PageInfo", + "description": "PageInfo is the page information returned for paginated queries.", + "type": "object", + "required": [ + "cursor", + "has_next_page" + ], + "properties": { + "cursor": { + "description": "The cursor to the next page.", + "type": "string" + }, + "has_next_page": { + "description": "Tells if there is a next page.", + "type": "boolean" + } + }, + "additionalProperties": false + } + } + }, + "objects": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "ObjectsResponse", + "description": "ObjectsResponse is the response of the Objects query.", + "type": "object", + "required": [ + "data", + "page_info" + ], + "properties": { + "data": { + "description": "The list of objects in the bucket.", + "type": "array", + "items": { + "$ref": "#/definitions/ObjectResponse" + } + }, + "page_info": { + "description": "The page information.", + "allOf": [ + { + "$ref": "#/definitions/PageInfo" + } + ] + } + }, + "additionalProperties": false, + "definitions": { + "CompressionAlgorithm": { + "description": "CompressionAlgorithm is an enumeration that defines the different compression algorithms supported for compressing the content of objects. The compression algorithm specified here are relevant algorithms for compressing data on-chain, which means that they are fast to compress and decompress, and have a low computational cost.\n\nThe order of the compression algorithms is based on their estimated computational cost (quite opinionated) during both compression and decompression, ranging from the lowest to the highest. This particular order is utilized to establish the default compression algorithm for storing an object.", + "oneOf": [ + { + "title": "Passthrough", + "description": "Represents no compression algorithm. The object is stored as is without any compression.", + "type": "string", + "enum": [ + "passthrough" + ] + }, + { + "title": "Snappy", + "description": "Represents the Snappy algorithm. Snappy is a compression/decompression algorithm that does not aim for maximum compression. Instead, it aims for very high speeds and reasonable compression.\n\nSee [the snappy web page](https://google.github.io/snappy/) for more information.", + "type": "string", + "enum": [ + "snappy" + ] + }, + { + "title": "Lzma", + "description": "Represents the LZMA algorithm. LZMA is a lossless data compression/decompression algorithm that features a high compression ratio and a variable compression-dictionary size up to 4 GB.\n\nSee [the LZMA wiki page](https://en.wikipedia.org/wiki/Lempel%E2%80%93Ziv%E2%80%93Markov_chain_algorithm) for more information.", + "type": "string", + "enum": [ + "lzma" + ] + } + ] + }, + "ObjectResponse": { + "title": "ObjectResponse", + "description": "ObjectResponse is the response of the Object query.", + "type": "object", + "required": [ + "compressed_size", + "compression_algorithm", + "id", + "is_pinned", + "owner", + "size" + ], + "properties": { + "compressed_size": { + "description": "The size of the object when compressed. If the object is not compressed, the value is the same as `size`.", + "allOf": [ + { + "$ref": "#/definitions/Uint128" + } + ] + }, + "compression_algorithm": { + "description": "The compression algorithm used to compress the content of the object.", + "allOf": [ + { + "$ref": "#/definitions/CompressionAlgorithm" + } + ] + }, + "id": { + "description": "The id of the object.", + "type": "string" + }, + "is_pinned": { + "description": "Tells if the object is pinned by at least one address.", + "type": "boolean" + }, + "owner": { + "description": "The owner of the object.", + "type": "string" + }, + "size": { + "description": "The size of the object.", + "allOf": [ + { + "$ref": "#/definitions/Uint128" + } + ] + } + }, + "additionalProperties": false + }, + "PageInfo": { + "title": "PageInfo", + "description": "PageInfo is the page information returned for paginated queries.", + "type": "object", + "required": [ + "cursor", + "has_next_page" + ], + "properties": { + "cursor": { + "description": "The cursor to the next page.", + "type": "string" + }, + "has_next_page": { + "description": "Tells if there is a next page.", + "type": "boolean" + } + }, + "additionalProperties": false + }, + "Uint128": { + "description": "A thin wrapper around u128 that is using strings for JSON encoding/decoding, such that the full u128 range can be used for clients that convert JSON numbers to floats, like JavaScript and jq.\n\n# Examples\n\nUse `from` to create instances of this and `u128` to get the value out:\n\n``` # use cosmwasm_std::Uint128; let a = Uint128::from(123u128); assert_eq!(a.u128(), 123);\n\nlet b = Uint128::from(42u64); assert_eq!(b.u128(), 42);\n\nlet c = Uint128::from(70u32); assert_eq!(c.u128(), 70); ```", + "type": "string" + } + } + } + }, + "description": "# Objectarium\n\nA [CosmWasm](https://cosmwasm.com/) Smart Contract which enables the storage of arbitrary unstructured [Objects](https://en.wikipedia.org/wiki/Object_storage) in any [Cosmos](https://cosmos.network/) blockchains.\n\n## Purpose\n\nThe smart contract serves as a robust storage solution, allowing for the storage of arbitrary `objects` on any blockchain within the [Cosmos blockchains](https://cosmos.network/) network, utilizing the [CosmWasm](https://cosmwasm.com/) framework. The key features of the contract include:\n\n**Versatile Data Storage:**\nThe contract is designed to accommodate any type of data, be it text, images, or complex data structures. This flexibility makes it an ideal choice for a wide range of decentralized applications (dApps) that require diverse storage needs.\n\n**On-chain Data:**\nBy design, the contract stores data on the blockchain, ensuring that it is immutable and publicly accessible. This is particularly useful for applications that require a high level of transparency, and also for any other smart contract that needs to store data on the blockchain.\n\n**Pinning and Unpinning:**\nOne unique feature is the ability to 'pin' and 'unpin' objects associated with a specific sender address. Pinning ensures that the object remains stored and accessible, while unpinning releases it from being permanently stored, offering a level of control over data persistence.\n\n**Object Removal:**\nThe contract also includes a 'forget' function, allowing for the removal of objects that are no longer pinned. This is particularly useful for managing storage costs and ensuring that only relevant data remains on the blockchain.\n\n**Cost Management:**\nFeatures like pinning, unpinning, and discarding objects offer a strategic way to control storage costs. Additionally, setting limits on contract size — for instance in terms of object count and their individual sizes — serves as a practical tool to regulate storage costs.\n\n## Rationale\n\nIn a sense, we can consider blockchains built on the [Cosmos L0](https://docs.cosmos.network/main) layer as decentralized databases, and their nature can be shaped and modeled through the smart contracts or modules. Given this, it provides a great opportunity to address the wide range of data management needs. One such important area is the management of unstructured, immutable data, which is written once but accessed frequently — commonly known as object storage. This is the primary focus of `axone-objectarium`: a specialized smart contract designed to offer a versatile and efficient approach to handling *on-chain*, *unstructured*, *immutable* data in a *decentralized* manner.\n\n## Terminology\n\n### Object\n\nIn the context of the `axone-objectarium` smart contract, an `object` refers to a piece of data stored on the blockchain. It can represent various types of information, such as documents, binary files, or any other digital content. Objects are immutable once stored and are identified by their cryptographic hash, which can be generated using algorithms like MD5 or SHA256. This ensures the integrity and security of the stored data, as any modification to the object would result in a different hash value.\n\n### Bucket\n\nThe smart contract is organized around buckets. A bucket represents a logical container within the `axone-objectarium` smart contract instance that groups related Objects together. It acts as a storage unit for Objects and provides a context for managing and organizing them. Each bucket has a unique name and is associated with a set of configurations and limits that define its behaviour and characteristics.\n\n### Pin\n\nPin refers to a mechanism that allows users to mark or \"pin\" specific objects within a bucket. Pinning an object serves as a way to ensure that the object remains in storage and cannot be removed (this is called \"forgotten\"). It provides protection and guarantees that the pinned object will persist in the protocol. When an object is pinned, it is associated with the identity (or sender) that performed the pinning action.\n\n## Usage\n\nThe unstructured nature of the data stored in the chain opens up a plethora of possibilities for decentralized applications that require this type of versatile storage.\n\n### In the AXONE protocol\n\nThe primary function of this smart contract within the AXONE protocol is to enable the persistence of governance rules, which are encoded in Prolog. These programs are stored in an immutable format within the protocol and can be referenced by their unique identifiers in situations where there is a need to refer to these rules.\n\n### In the wild world\n\nA plethora of possibilities opens up for decentralized applications (dApps) that require this kind of versatile storage. However, it's important to consider the following constraints: the data is immutable, the cost of recording the data is proportional to its size, and the data is publicly accessible.\n\n## Play\n\n### Instantiation\n\nThe `axone-objectarium` can be instantiated as follows, refer to the schema for more information on configuration, limits and pagination configuration:\n\n```bash\naxoned tx wasm instantiate $CODE_ID \\\n --label \"my-storage\" \\\n --from $ADDR \\\n --admin $ADMIN_ADDR \\\n --gas 1000000 \\\n '{\"bucket\":\"my-bucket\"}'\n```\n\n### Execution\n\nWe can store an object by providing its data in base64 encoded, we can pin the stored object to prevent it from being removed:\n\n```bash\naxoned tx wasm execute $CONTRACT_ADDR \\\n --from $ADDR \\\n --gas 1000000 \\\n \"{\\\"store_object\\\":{\\\"data\\\": \\\"$(cat my-data | base64)\\\",\\\"pin\\\":true}}\"\n```\n\nThe object id is stable as it is a hash, we can't store an object twice.\n\nWith the following commands we can pin and unpin existing objects:\n\n```bash\naxoned tx wasm execute $CONTRACT_ADDR \\\n --from $ADDR \\\n --gas 1000000 \\\n \"{\\\"pin_object\\\":{\\\"id\\\": \\\"$OBJECT_ID\\\"}}\"\n\naxoned tx wasm execute $CONTRACT_ADDR \\\n --from $ADDR \\\n --gas 1000000 \\\n \"{\\\"unpin_object\\\":{\\\"id\\\": \\\"$OBJECT_ID\\\"}}\"\n```\n\nAnd if an object is not pinned, or pinned by the sender of transaction, we can remove it:\n\n```bash\naxoned tx wasm execute $CONTRACT_ADDR \\\n --from $ADDR \\\n --gas 1000000 \\\n \"{\\\"forget_object\\\":{\\\"id\\\": \\\"$OBJECT_ID\\\"}}\"\n```\n\n### Querying\n\nQuery an object by its id:\n\n```bash\naxoned query wasm contract-state smart $CONTRACT_ADDR \\\n \"{\\\"object\\\": {\\\"id\\\": \\\"$OBJECT_ID\\\"}}\"\n```\n\nOr its data:\n\n```bash\naxoned query wasm contract-state smart $CONTRACT_ADDR \\\n \"{\\\"object_data\\\": {\\\"id\\\": \\\"$OBJECT_ID\\\"}}\"\n```\n\nWe can also list the objects, eventually filtering on the object owner:\n\n```bash\naxoned query wasm contract-state smart $CONTRACT_ADDR \\\n \"{\\\"objects\\\": {\\\"address\\\": \\\"axone1p8u47en82gmzfm259y6z93r9qe63l25d858vqu\\\"}}\"\n```\n\nAnd navigate in a cursor based pagination:\n\n```bash\naxoned query wasm contract-state smart $CONTRACT_ADDR \\\n \"{\\\"objects\\\": {\\\"first\\\": 5, \\\"after\\\": \\\"23Y5t5DBe7DkPwfJo3Sd26Y8Z9epmtpA1FTpdG7DiG6MD8vPRTzzbQ9TccmyoBcePkPK6atUiqcAzJVo3TfYNBGY\\\"}}\"\n```\n\nWe can also query object pins with the same cursor based pagination:\n\n```bash\naxoned query wasm contract-state smart $CONTRACT_ADDR \\\n \"{\\\"object_pins\\\": {\\\"id\\\": \\\"$OBJECT_ID\\\", \\\"first\\\": 5, \\\"after\\\": \\\"23Y5t5DBe7DkPwfJo3Sd26Y8Z9epmtpA1FTpdG7DiG6MD8vPRTzzbQ9TccmyoBcePkPK6atUiqcAzJVo3TfYNBGY\\\"}}\"\n```", + "title": "axone-objectarium" +} From 9db02b6542cf5ed50941bbb3e3f0207c00b87c4b Mon Sep 17 00:00:00 2001 From: srdtrk Date: Sat, 22 Jun 2024 02:00:49 +0400 Subject: [PATCH 4/4] fix: add back the dao-dao typo to fix in a seperate pr --- integration_test/integration_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration_test/integration_test.go b/integration_test/integration_test.go index dfb719b..b8b3ff2 100644 --- a/integration_test/integration_test.go +++ b/integration_test/integration_test.go @@ -104,7 +104,7 @@ func (s *MySuite) TestQueryClient() { s.GenerateQueryClientTest("testdata/cw721-base.json") s.GenerateQueryClientTest("testdata/cw2981-royalties.json") s.GenerateQueryClientTest("testdata/ics721.json") - s.GenerateQueryClientTest("testdata/dao-dao-core.json") + s.GenerateMessageTypesTest("testdata/dao-dao-core.json") s.GenerateQueryClientTest("testdata/axone-objectarium.json") }