diff --git a/.changes/1.36.3.json b/.changes/1.36.3.json new file mode 100644 index 0000000000..94029600af --- /dev/null +++ b/.changes/1.36.3.json @@ -0,0 +1,42 @@ +[ + { + "category": "``batch``", + "description": "Documentation-only update: clarified the description of the shareDecaySeconds parameter of the FairsharePolicy data type, clarified the description of the priority parameter of the JobQueueDetail data type.", + "type": "api-change" + }, + { + "category": "``cognito-idp``", + "description": "corrects the dual-stack endpoint configuration for cognitoidp", + "type": "api-change" + }, + { + "category": "``connect``", + "description": "Added DeleteContactFlowVersion API and the CAMPAIGN flow type", + "type": "api-change" + }, + { + "category": "``emr-serverless``", + "description": "Increasing entryPoint in SparkSubmit to accept longer script paths. New limit is 4kb.", + "type": "api-change" + }, + { + "category": "``iotsitewise``", + "description": "AWS IoT SiteWise now supports ingestion and querying of Null (all data types) and NaN (double type) values of bad or uncertain data quality. New partial error handling prevents data loss during ingestion. Enabled by default for new customers; existing customers can opt-in.", + "type": "api-change" + }, + { + "category": "``logs``", + "description": "Documentation-only update to address doc errors", + "type": "api-change" + }, + { + "category": "``quicksight``", + "description": "Added `DigitGroupingStyle` in ThousandsSeparator to allow grouping by `LAKH`( Indian Grouping system ) currency. Support LAKH and `CRORE` currency types in Column Formatting.", + "type": "api-change" + }, + { + "category": "``sns``", + "description": "This release adds support for the topic attribute FifoThroughputScope for SNS FIFO topics. For details, see the documentation history in the Amazon Simple Notification Service Developer Guide.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 3c5061a1b4..4893c1ceee 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -23,13 +23,13 @@ jobs: uses: "actions/checkout@d632683dd7b4114ad314bca15554477dd762a938" - name: "Run CodeQL init" - uses: "github/codeql-action/init@662472033e021d55d94146f66f6058822b0b39fd" + uses: "github/codeql-action/init@b6a472f63d85b9c78a3ac5e89422239fc15e9b3c" with: config-file: "./.github/codeql.yml" languages: "python" - name: "Run CodeQL autobuild" - uses: "github/codeql-action/autobuild@662472033e021d55d94146f66f6058822b0b39fd" + uses: "github/codeql-action/autobuild@b6a472f63d85b9c78a3ac5e89422239fc15e9b3c" - name: "Run CodeQL analyze" - uses: "github/codeql-action/analyze@662472033e021d55d94146f66f6058822b0b39fd" + uses: "github/codeql-action/analyze@b6a472f63d85b9c78a3ac5e89422239fc15e9b3c" diff --git a/.github/workflows/stale_issue.yml b/.github/workflows/stale_issue.yml index 7789fe4889..723bfcdb0f 100644 --- a/.github/workflows/stale_issue.yml +++ b/.github/workflows/stale_issue.yml @@ -17,7 +17,7 @@ jobs: runs-on: ubuntu-latest name: Stale issue job steps: - - uses: aws-actions/stale-issue-cleanup@413d85a1603df4f0a8158f5f3c8204ab1691313e + - uses: aws-actions/stale-issue-cleanup@c452909aaa3fd1be240576cb41c6dd7bcb95cc10 with: issue-types: issues stale-issue-message: Greetings! It looks like this issue hasn’t been diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f5027eccc0..b37fe4109f 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,19 @@ CHANGELOG ========= +1.36.3 +====== + +* api-change:``batch``: Documentation-only update: clarified the description of the shareDecaySeconds parameter of the FairsharePolicy data type, clarified the description of the priority parameter of the JobQueueDetail data type. +* api-change:``cognito-idp``: corrects the dual-stack endpoint configuration for cognitoidp +* api-change:``connect``: Added DeleteContactFlowVersion API and the CAMPAIGN flow type +* api-change:``emr-serverless``: Increasing entryPoint in SparkSubmit to accept longer script paths. New limit is 4kb. +* api-change:``iotsitewise``: AWS IoT SiteWise now supports ingestion and querying of Null (all data types) and NaN (double type) values of bad or uncertain data quality. New partial error handling prevents data loss during ingestion. Enabled by default for new customers; existing customers can opt-in. +* api-change:``logs``: Documentation-only update to address doc errors +* api-change:``quicksight``: Added `DigitGroupingStyle` in ThousandsSeparator to allow grouping by `LAKH`( Indian Grouping system ) currency. Support LAKH and `CRORE` currency types in Column Formatting. +* api-change:``sns``: This release adds support for the topic attribute FifoThroughputScope for SNS FIFO topics. For details, see the documentation history in the Amazon Simple Notification Service Developer Guide. + + 1.36.2 ====== diff --git a/botocore/__init__.py b/botocore/__init__.py index 9a335803d0..18e80ef6cb 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.36.2' +__version__ = '1.36.3' class NullHandler(logging.Handler): diff --git a/botocore/data/batch/2016-08-10/service-2.json b/botocore/data/batch/2016-08-10/service-2.json index 2f2550a1f2..5cc1f6cb65 100644 --- a/botocore/data/batch/2016-08-10/service-2.json +++ b/botocore/data/batch/2016-08-10/service-2.json @@ -2366,7 +2366,7 @@ "members":{ "shareDecaySeconds":{ "shape":"Integer", - "documentation":"

The amount of time (in seconds) to use to calculate a fair share percentage for each fair share identifier in use. A value of zero (0) indicates that only current usage is measured. The decay allows for more recently run jobs to have more weight than jobs that ran earlier. The maximum supported value is 604800 (1 week).

" + "documentation":"

The amount of time (in seconds) to use to calculate a fair share percentage for each fair share identifier in use. A value of zero (0) indicates the default minimum time window (600 seconds). The maximum supported value is 604800 (1 week).

The decay allows for more recently run jobs to have more weight than jobs that ran earlier. Consider adjusting this number if you have jobs that (on average) run longer than ten minutes, or a large difference in job count or job run times between share identifiers, and the allocation of resources doesn’t meet your needs.

" }, "computeReservation":{ "shape":"Integer", @@ -2779,7 +2779,7 @@ }, "priority":{ "shape":"Integer", - "documentation":"

The priority of the job queue. Job queues with a higher priority (or a higher integer value for the priority parameter) are evaluated first when associated with the same compute environment. Priority is determined in descending order. For example, a job queue with a priority value of 10 is given scheduling preference over a job queue with a priority value of 1. All of the compute environments must be either Amazon EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT). Amazon EC2 and Fargate compute environments can't be mixed.

" + "documentation":"

The priority of the job queue. Job queue priority determines the order that job queues are evaluated when multiple queues dispatch jobs within a shared compute environment. A higher value for priority indicates a higher priority. Queues are evaluated in cycles, in descending order by priority. For example, a job queue with a priority value of 10 is evaluated before a queue with a priority value of 1. All of the compute environments must be either Amazon EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT). Amazon EC2 and Fargate compute environments can't be mixed.

Job queue priority doesn't guarantee that a particular job executes before a job in a lower priority queue. Jobs added to higher priority queues during the queue evaluation cycle might not be evaluated until the next cycle. A job is dispatched from a queue only if resources are available when the queue is evaluated. If there are insufficient resources available at that time, the cycle proceeds to the next queue. This means that jobs added to higher priority queues might have to wait for jobs in multiple lower priority queues to complete before they are dispatched. You can use job dependencies to control the order for jobs from queues with different priorities. For more information, see Job Dependencies in the Batch User Guide.

" }, "computeEnvironmentOrder":{ "shape":"ComputeEnvironmentOrders", @@ -2996,7 +2996,7 @@ }, "targetInstanceTypes":{ "shape":"StringList", - "documentation":"

The instance type or family that this this override launch template should be applied to.

This parameter is required when defining a launch template override.

Information included in this parameter must meet the following requirements:

" + "documentation":"

The instance type or family that this override launch template should be applied to.

This parameter is required when defining a launch template override.

Information included in this parameter must meet the following requirements:

" } }, "documentation":"

An object that represents a launch template to use in place of the default launch template. You must specify either the launch template ID or launch template name in the request, but not both.

If security groups are specified using both the securityGroupIds parameter of CreateComputeEnvironment and the launch template, the values in the securityGroupIds parameter of CreateComputeEnvironment will be used.

You can define up to ten (10) overrides for each compute environment.

This object isn't applicable to jobs that are running on Fargate resources.

To unset all override templates for a compute environment, you can pass an empty array to the UpdateComputeEnvironment.overrides parameter, or not include the overrides parameter when submitting the UpdateComputeEnvironment API operation.

" @@ -3967,7 +3967,7 @@ }, "environment":{ "shape":"EnvironmentVariables", - "documentation":"

The environment variables to pass to a container. This parameter maps to Env inthe Create a container section of the Docker Remote API and the --env parameter to docker run.

We don't recommend using plaintext environment variables for sensitive information, such as credential data.

Environment variables cannot start with AWS_BATCH. This naming convention is reserved for variables that Batch sets.

" + "documentation":"

The environment variables to pass to a container. This parameter maps to Env in the Create a container section of the Docker Remote API and the --env parameter to docker run.

We don't recommend using plaintext environment variables for sensitive information, such as credential data.

Environment variables cannot start with AWS_BATCH. This naming convention is reserved for variables that Batch sets.

" }, "essential":{ "shape":"Boolean", diff --git a/botocore/data/cognito-idp/2016-04-18/endpoint-rule-set-1.json b/botocore/data/cognito-idp/2016-04-18/endpoint-rule-set-1.json index 0b7d7c8290..5e76d91815 100644 --- a/botocore/data/cognito-idp/2016-04-18/endpoint-rule-set-1.json +++ b/botocore/data/cognito-idp/2016-04-18/endpoint-rule-set-1.json @@ -166,6 +166,82 @@ } ], "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "us-east-1" + ] + } + ], + "endpoint": { + "url": "https://cognito-idp-fips.us-east-1.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "us-east-2" + ] + } + ], + "endpoint": { + "url": "https://cognito-idp-fips.us-east-2.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "us-west-1" + ] + } + ], + "endpoint": { + "url": "https://cognito-idp-fips.us-west-1.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "us-west-2" + ] + } + ], + "endpoint": { + "url": "https://cognito-idp-fips.us-west-2.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [], "endpoint": { @@ -270,6 +346,31 @@ } ], "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://cognito-idp.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [], "endpoint": { diff --git a/botocore/data/connect/2017-08-08/service-2.json b/botocore/data/connect/2017-08-08/service-2.json index 1befa2930d..7bb6e8b8ae 100644 --- a/botocore/data/connect/2017-08-08/service-2.json +++ b/botocore/data/connect/2017-08-08/service-2.json @@ -275,7 +275,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

>Associates a set of proficiencies with a user.

" + "documentation":"

Associates a set of proficiencies with a user.

" }, "BatchAssociateAnalyticsDataSet":{ "name":"BatchAssociateAnalyticsDataSet", @@ -499,7 +499,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Publishes a new version of the flow provided. Versions are immutable and monotonically increasing. If a version of the same flow content already exists, no new version is created and instead the existing version number is returned. If the FlowContentSha256 provided is different from the FlowContentSha256 of the $LATEST published flow content, then an error is returned. This API only supports creating versions for flows of type Campaign.

" + "documentation":"

Publishes a new version of the flow provided. Versions are immutable and monotonically increasing. If the FlowContentSha256 provided is different from the FlowContentSha256 of the $LATEST published flow content, then an error is returned. This API only supports creating versions for flows of type Campaign.

" }, "CreateEmailAddress":{ "name":"CreateEmailAddress", @@ -1040,6 +1040,24 @@ ], "documentation":"

Deletes the specified flow module.

" }, + "DeleteContactFlowVersion":{ + "name":"DeleteContactFlowVersion", + "http":{ + "method":"DELETE", + "requestUri":"/contact-flows/{InstanceId}/{ContactFlowId}/version/{ContactFlowVersion}" + }, + "input":{"shape":"DeleteContactFlowVersionRequest"}, + "output":{"shape":"DeleteContactFlowVersionResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Deletes the particular version specified in flow version identifier.

" + }, "DeleteEmailAddress":{ "name":"DeleteEmailAddress", "http":{ @@ -1203,7 +1221,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Deletes a queue. It isn't possible to delete a queue by using the Amazon Connect admin website.

" + "documentation":"

Deletes a queue.

" }, "DeleteQuickConnect":{ "name":"DeleteQuickConnect", @@ -1493,7 +1511,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Describes the specified flow.

You can also create and update flows using the Amazon Connect Flow language.

Use the $SAVED alias in the request to describe the SAVED content of a Flow. For example, arn:aws:.../contact-flow/{id}:$SAVED. After a flow is published, $SAVED needs to be supplied to view saved content that has not been published.

In the response, Status indicates the flow status as either SAVED or PUBLISHED. The PUBLISHED status will initiate validation on the content. SAVED does not initiate validation of the content. SAVED | PUBLISHED

" + "documentation":"

Describes the specified flow.

You can also create and update flows using the Amazon Connect Flow language.

Use the $SAVED alias in the request to describe the SAVED content of a Flow. For example, arn:aws:.../contact-flow/{id}:$SAVED. After a flow is published, $SAVED needs to be supplied to view saved content that has not been published.

Use arn:aws:.../contact-flow/{id}:{version} to retrieve the content of a specific flow version.

In the response, Status indicates the flow status as either SAVED or PUBLISHED. The PUBLISHED status will initiate validation on the content. SAVED does not initiate validation of the content. SAVED | PUBLISHED

" }, "DescribeContactFlowModule":{ "name":"DescribeContactFlowModule", @@ -7381,7 +7399,8 @@ "AGENT_WHISPER", "OUTBOUND_WHISPER", "AGENT_TRANSFER", - "QUEUE_TRANSFER" + "QUEUE_TRANSFER", + "CAMPAIGN" ] }, "ContactFlowTypes":{ @@ -7832,7 +7851,7 @@ }, "FlowContentSha256":{ "shape":"FlowContentSha256", - "documentation":"

Indicates the checksum value of the flow content.

" + "documentation":"

Indicates the checksum value of the latest published flow content.

" } } }, @@ -7863,6 +7882,10 @@ "shape":"FlowContentSha256", "documentation":"

Indicates the checksum value of the flow content.

" }, + "ContactFlowVersion":{ + "shape":"ResourceVersion", + "documentation":"

The identifier of the flow version.

" + }, "LastModifiedTime":{ "shape":"Timestamp", "documentation":"

The Amazon Web Services Region where this resource was last modified.

" @@ -9617,6 +9640,39 @@ "members":{ } }, + "DeleteContactFlowVersionRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "ContactFlowId", + "ContactFlowVersion" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "ContactFlowId":{ + "shape":"ARN", + "documentation":"

The identifier of the flow.

", + "location":"uri", + "locationName":"ContactFlowId" + }, + "ContactFlowVersion":{ + "shape":"ResourceVersion", + "documentation":"

The identifier of the flow version.

", + "location":"uri", + "locationName":"ContactFlowVersion" + } + } + }, + "DeleteContactFlowVersionResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteEmailAddressRequest":{ "type":"structure", "required":[ diff --git a/botocore/data/emr-serverless/2021-07-13/service-2.json b/botocore/data/emr-serverless/2021-07-13/service-2.json index 86100c0cb3..d32f0d655d 100644 --- a/botocore/data/emr-serverless/2021-07-13/service-2.json +++ b/botocore/data/emr-serverless/2021-07-13/service-2.json @@ -822,7 +822,7 @@ }, "EntryPointPath":{ "type":"string", - "max":256, + "max":4096, "min":1, "pattern":".*\\S.*", "sensitive":true diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 1bdb751b5b..bae1e8fe16 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -8757,6 +8757,13 @@ "deprecated" : true, "hostname" : "elasticfilesystem-fips.me-south-1.amazonaws.com" }, + "fips-mx-central-1" : { + "credentialScope" : { + "region" : "mx-central-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.mx-central-1.amazonaws.com" + }, "fips-sa-east-1" : { "credentialScope" : { "region" : "sa-east-1" @@ -8810,6 +8817,12 @@ "tags" : [ "fips" ] } ] }, + "mx-central-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.mx-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "sa-east-1" : { "variants" : [ { "hostname" : "elasticfilesystem-fips.sa-east-1.amazonaws.com", diff --git a/botocore/data/iotsitewise/2019-12-02/service-2.json b/botocore/data/iotsitewise/2019-12-02/service-2.json index 721158573e..2d97cd3c17 100644 --- a/botocore/data/iotsitewise/2019-12-02/service-2.json +++ b/botocore/data/iotsitewise/2019-12-02/service-2.json @@ -3583,6 +3583,10 @@ "type":"structure", "required":["entries"], "members":{ + "enablePartialEntryProcessing":{ + "shape":"BooleanValue", + "documentation":"

This setting enables partial ingestion at entry-level. If set to true, we ingest all TQVs not resulting in an error. If set to false, an invalid TQV fails ingestion of the entire entry that contains it.

" + }, "entries":{ "shape":"PutAssetPropertyValueEntries", "documentation":"

The list of asset property value entries for the batch put request. You can specify up to 10 entries per request.

" @@ -3599,6 +3603,7 @@ } } }, + "BooleanValue":{"type":"boolean"}, "Bucket":{ "type":"string", "max":63, @@ -6023,6 +6028,10 @@ "warmTierRetentionPeriod":{ "shape":"WarmTierRetentionPeriod", "documentation":"

Set this period to specify how long your data is stored in the warm tier before it is deleted. You can set this only if cold tier is enabled.

" + }, + "disallowIngestNullNaN":{ + "shape":"DisallowIngestNullNaN", + "documentation":"

Describes the configuration for ingesting NULL and NaN data. By default the feature is allowed. The feature is disallowed if the value is true.

" } } }, @@ -6133,6 +6142,7 @@ "type":"list", "member":{"shape":"DetailedError"} }, + "DisallowIngestNullNaN":{"type":"boolean"}, "DisassociateAssetsRequest":{ "type":"structure", "required":[ @@ -8206,10 +8216,7 @@ "type":"boolean", "box":true }, - "NumberOfDays":{ - "type":"integer", - "min":30 - }, + "NumberOfDays":{"type":"integer"}, "Offset":{ "type":"string", "max":25, @@ -8543,6 +8550,17 @@ "PropertyValueBooleanValue":{"type":"boolean"}, "PropertyValueDoubleValue":{"type":"double"}, "PropertyValueIntegerValue":{"type":"integer"}, + "PropertyValueNullValue":{ + "type":"structure", + "required":["valueType"], + "members":{ + "valueType":{ + "shape":"RawValueType", + "documentation":"

The type of null asset property data.

" + } + }, + "documentation":"

The value type of null asset property data with BAD and UNCERTAIN qualities.

" + }, "PropertyValueStringValue":{"type":"string"}, "PutAssetPropertyValueEntries":{ "type":"list", @@ -8652,6 +8670,10 @@ "warmTierRetentionPeriod":{ "shape":"WarmTierRetentionPeriod", "documentation":"

Set this period to specify how long your data is stored in the warm tier before it is deleted. You can set this only if cold tier is enabled.

" + }, + "disallowIngestNullNaN":{ + "shape":"DisallowIngestNullNaN", + "documentation":"

Describes the configuration for ingesting NULL and NaN data. By default the feature is allowed. The feature is disallowed if the value is true.

" } } }, @@ -8683,6 +8705,10 @@ "warmTierRetentionPeriod":{ "shape":"WarmTierRetentionPeriod", "documentation":"

Set this period to specify how long your data is stored in the warm tier before it is deleted. You can set this only if cold tier is enabled.

" + }, + "disallowIngestNullNaN":{ + "shape":"DisallowIngestNullNaN", + "documentation":"

Describes the configuration for ingesting NULL and NaN data. By default the feature is allowed. The feature is disallowed if the value is true.

" } } }, @@ -8714,6 +8740,16 @@ "error":{"httpStatusCode":400}, "exception":true }, + "RawValueType":{ + "type":"string", + "enum":[ + "D", + "B", + "S", + "I", + "U" + ] + }, "Reference":{ "type":"structure", "members":{ @@ -9773,7 +9809,7 @@ "members":{ "stringValue":{ "shape":"PropertyValueStringValue", - "documentation":"

Asset property data of type string (sequence of characters).

" + "documentation":"

Asset property data of type string (sequence of characters). The allowed pattern: \"^$|[^\\u0000-\\u001F\\u007F]+\". The max length is 1024.

" }, "integerValue":{ "shape":"PropertyValueIntegerValue", @@ -9781,11 +9817,15 @@ }, "doubleValue":{ "shape":"PropertyValueDoubleValue", - "documentation":"

Asset property data of type double (floating point number).

" + "documentation":"

Asset property data of type double (floating point number). The min value is -10^10. The max value is 10^10. Double.NaN is allowed.

" }, "booleanValue":{ "shape":"PropertyValueBooleanValue", "documentation":"

Asset property data of type Boolean (true or false).

" + }, + "nullValue":{ + "shape":"PropertyValueNullValue", + "documentation":"

The type of null asset property data with BAD and UNCERTAIN qualities.

" } }, "documentation":"

Contains an asset property value (of a single type only).

" diff --git a/botocore/data/logs/2014-03-28/service-2.json b/botocore/data/logs/2014-03-28/service-2.json index b31f879823..0c2b284698 100644 --- a/botocore/data/logs/2014-03-28/service-2.json +++ b/botocore/data/logs/2014-03-28/service-2.json @@ -27,7 +27,7 @@ {"shape":"OperationAbortedException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Associates the specified KMS key with either one log group in the account, or with all stored CloudWatch Logs query insights results in the account.

When you use AssociateKmsKey, you specify either the logGroupName parameter or the resourceIdentifier parameter. You can't specify both of those parameters in the same operation.

If you delete the key that is used to encrypt log events or log group query results, then all the associated stored log events or query results that were encrypted with that key will be unencryptable and unusable.

CloudWatch Logs supports only symmetric KMS keys. Do not use an associate an asymmetric KMS key with your log group or query results. For more information, see Using Symmetric and Asymmetric Keys.

It can take up to 5 minutes for this operation to take effect.

If you attempt to associate a KMS key with a log group but the KMS key does not exist or the KMS key is disabled, you receive an InvalidParameterException error.

" + "documentation":"

Associates the specified KMS key with either one log group in the account, or with all stored CloudWatch Logs query insights results in the account.

When you use AssociateKmsKey, you specify either the logGroupName parameter or the resourceIdentifier parameter. You can't specify both of those parameters in the same operation.

If you delete the key that is used to encrypt log events or log group query results, then all the associated stored log events or query results that were encrypted with that key will be unencryptable and unusable.

CloudWatch Logs supports only symmetric KMS keys. Do not use an associate an asymmetric KMS key with your log group or query results. For more information, see Using Symmetric and Asymmetric Keys.

It can take up to 5 minutes for this operation to take effect.

If you attempt to associate a KMS key with a log group but the KMS key does not exist or the KMS key is disabled, you receive an InvalidParameterException error.

" }, "CancelExportTask":{ "name":"CancelExportTask", @@ -79,7 +79,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ResourceAlreadyExistsException"} ], - "documentation":"

Creates an export task so that you can efficiently export data from a log group to an Amazon S3 bucket. When you perform a CreateExportTask operation, you must use credentials that have permission to write to the S3 bucket that you specify as the destination.

Exporting log data to S3 buckets that are encrypted by KMS is supported. Exporting log data to Amazon S3 buckets that have S3 Object Lock enabled with a retention period is also supported.

Exporting to S3 buckets that are encrypted with AES-256 is supported.

This is an asynchronous call. If all the required information is provided, this operation initiates an export task and responds with the ID of the task. After the task has started, you can use DescribeExportTasks to get the status of the export task. Each account can only have one active (RUNNING or PENDING) export task at a time. To cancel an export task, use CancelExportTask.

You can export logs from multiple log groups or multiple time ranges to the same S3 bucket. To separate log data for each export task, specify a prefix to be used as the Amazon S3 key prefix for all exported objects.

Time-based sorting on chunks of log data inside an exported file is not guaranteed. You can sort the exported log field data by using Linux utilities.

" + "documentation":"

Creates an export task so that you can efficiently export data from a log group to an Amazon S3 bucket. When you perform a CreateExportTask operation, you must use credentials that have permission to write to the S3 bucket that you specify as the destination.

Exporting log data to S3 buckets that are encrypted by KMS is supported. Exporting log data to Amazon S3 buckets that have S3 Object Lock enabled with a retention period is also supported.

Exporting to S3 buckets that are encrypted with AES-256 is supported.

This is an asynchronous call. If all the required information is provided, this operation initiates an export task and responds with the ID of the task. After the task has started, you can use DescribeExportTasks to get the status of the export task. Each account can only have one active (RUNNING or PENDING) export task at a time. To cancel an export task, use CancelExportTask.

You can export logs from multiple log groups or multiple time ranges to the same S3 bucket. To separate log data for each export task, specify a prefix to be used as the Amazon S3 key prefix for all exported objects.

We recommend that you don't regularly export to Amazon S3 as a way to continuously archive your logs. For that use case, we instaed recommend that you use subscriptions. For more information about subscriptions, see Real-time processing of log data with subscriptions.

Time-based sorting on chunks of log data inside an exported file is not guaranteed. You can sort the exported log field data by using Linux utilities.

" }, "CreateLogAnomalyDetector":{ "name":"CreateLogAnomalyDetector", @@ -174,7 +174,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Deletes s delivery. A delivery is a connection between a logical delivery source and a logical delivery destination. Deleting a delivery only deletes the connection between the delivery source and delivery destination. It does not delete the delivery destination or the delivery source.

" + "documentation":"

Deletes a delivery. A delivery is a connection between a logical delivery source and a logical delivery destination. Deleting a delivery only deletes the connection between the delivery source and delivery destination. It does not delete the delivery destination or the delivery source.

" }, "DeleteDeliveryDestination":{ "name":"DeleteDeliveryDestination", @@ -422,7 +422,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Returns a list of all CloudWatch Logs account policies in the account.

" + "documentation":"

Returns a list of all CloudWatch Logs account policies in the account.

To use this operation, you must be signed on with the correct permissions depending on the type of policy that you are retrieving information for.

" }, "DescribeConfigurationTemplates":{ "name":"DescribeConfigurationTemplates", @@ -577,7 +577,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Lists the log streams for the specified log group. You can list all the log streams or filter the results by prefix. You can also control how the results are ordered.

You can specify the log group to search by using either logGroupIdentifier or logGroupName. You must include one of these two parameters, but you can't include both.

This operation has a limit of five transactions per second, after which transactions are throttled.

If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.

" + "documentation":"

Lists the log streams for the specified log group. You can list all the log streams or filter the results by prefix. You can also control how the results are ordered.

You can specify the log group to search by using either logGroupIdentifier or logGroupName. You must include one of these two parameters, but you can't include both.

This operation has a limit of 25 transactions per second, after which transactions are throttled.

If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.

" }, "DescribeMetricFilters":{ "name":"DescribeMetricFilters", @@ -980,7 +980,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Creates an account-level data protection policy, subscription filter policy, or field index policy that applies to all log groups or a subset of log groups in the account.

Data protection policy

A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy.

Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked.

If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked.

By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command.

For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking.

To use the PutAccountPolicy operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy and logs:PutAccountPolicy permissions.

The PutAccountPolicy operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked.

Subscription filter policy

A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.

The following destinations are supported for subscription filters:

Each account can have one account-level subscription filter policy per Region. If you are updating an existing filter, you must specify the correct name in PolicyName. To perform a PutAccountPolicy subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.

Transformer policy

Creates or updates a log transformer policy for your account. You use log transformers to transform log events into a different format, making them easier for you to process and analyze. You can also transform logs from different sources into standardized formats that contain relevant, source-specific information. After you have created a transformer, CloudWatch Logs performs this transformation at the time of log ingestion. You can then refer to the transformed versions of the logs during operations such as querying with CloudWatch Logs Insights or creating metric filters or subscription filters.

You can also use a transformer to copy metadata from metadata keys into the log events themselves. This metadata can include log group name, log stream name, account ID and Region.

A transformer for a log group is a series of processors, where each processor applies one type of transformation to the log events ingested into this log group. For more information about the available processors to use in a transformer, see Processors that you can use.

Having log events in standardized format enables visibility across your applications for your log analysis, reporting, and alarming needs. CloudWatch Logs provides transformation for common log types with out-of-the-box transformation templates for major Amazon Web Services log sources such as VPC flow logs, Lambda, and Amazon RDS. You can use pre-built transformation templates or create custom transformation policies.

You can create transformers only for the log groups in the Standard log class.

You can have one account-level transformer policy that applies to all log groups in the account. Or you can create as many as 20 account-level transformer policies that are each scoped to a subset of log groups with the selectionCriteria parameter. If you have multiple account-level transformer policies with selection criteria, no two of them can use the same or overlapping log group name prefixes. For example, if you have one policy filtered to log groups that start with my-log, you can't have another field index policy filtered to my-logpprod or my-logging.

You can also set up a transformer at the log-group level. For more information, see PutTransformer. If there is both a log-group level transformer created with PutTransformer and an account-level transformer that could apply to the same log group, the log group uses only the log-group level transformer. It ignores the account-level transformer.

Field index policy

You can use field index policies to create indexes on fields found in log events in the log group. Creating field indexes can help lower the scan volume for CloudWatch Logs Insights queries that reference those fields, because these queries attempt to skip the processing of log events that are known to not match the indexed field. Good fields to index are fields that you often need to query for and fields or values that match only a small fraction of the total log events. Common examples of indexes include request ID, session ID, user IDs, or instance IDs. For more information, see Create field indexes to improve query performance and reduce costs

To find the fields that are in your log group events, use the GetLogGroupFields operation.

For example, suppose you have created a field index for requestId. Then, any CloudWatch Logs Insights query on that log group that includes requestId = value or requestId in [value, value, ...] will attempt to process only the log events where the indexed field matches the specified value.

Matches of log events to the names of indexed fields are case-sensitive. For example, an indexed field of RequestId won't match a log event containing requestId.

You can have one account-level field index policy that applies to all log groups in the account. Or you can create as many as 20 account-level field index policies that are each scoped to a subset of log groups with the selectionCriteria parameter. If you have multiple account-level index policies with selection criteria, no two of them can use the same or overlapping log group name prefixes. For example, if you have one policy filtered to log groups that start with my-log, you can't have another field index policy filtered to my-logpprod or my-logging.

If you create an account-level field index policy in a monitoring account in cross-account observability, the policy is applied only to the monitoring account and not to any source accounts.

If you want to create a field index policy for a single log group, you can use PutIndexPolicy instead of PutAccountPolicy. If you do so, that log group will use only that log-group level policy, and will ignore the account-level policy that you create with PutAccountPolicy.

" + "documentation":"

Creates an account-level data protection policy, subscription filter policy, or field index policy that applies to all log groups or a subset of log groups in the account.

To use this operation, you must be signed on with the correct permissions depending on the type of policy that you are creating.

Data protection policy

A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy.

Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked.

If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked.

By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command.

For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking.

To use the PutAccountPolicy operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy and logs:PutAccountPolicy permissions.

The PutAccountPolicy operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked.

Subscription filter policy

A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.

The following destinations are supported for subscription filters:

Each account can have one account-level subscription filter policy per Region. If you are updating an existing filter, you must specify the correct name in PolicyName. To perform a PutAccountPolicy subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.

Transformer policy

Creates or updates a log transformer policy for your account. You use log transformers to transform log events into a different format, making them easier for you to process and analyze. You can also transform logs from different sources into standardized formats that contain relevant, source-specific information. After you have created a transformer, CloudWatch Logs performs this transformation at the time of log ingestion. You can then refer to the transformed versions of the logs during operations such as querying with CloudWatch Logs Insights or creating metric filters or subscription filters.

You can also use a transformer to copy metadata from metadata keys into the log events themselves. This metadata can include log group name, log stream name, account ID and Region.

A transformer for a log group is a series of processors, where each processor applies one type of transformation to the log events ingested into this log group. For more information about the available processors to use in a transformer, see Processors that you can use.

Having log events in standardized format enables visibility across your applications for your log analysis, reporting, and alarming needs. CloudWatch Logs provides transformation for common log types with out-of-the-box transformation templates for major Amazon Web Services log sources such as VPC flow logs, Lambda, and Amazon RDS. You can use pre-built transformation templates or create custom transformation policies.

You can create transformers only for the log groups in the Standard log class.

You can have one account-level transformer policy that applies to all log groups in the account. Or you can create as many as 20 account-level transformer policies that are each scoped to a subset of log groups with the selectionCriteria parameter. If you have multiple account-level transformer policies with selection criteria, no two of them can use the same or overlapping log group name prefixes. For example, if you have one policy filtered to log groups that start with my-log, you can't have another field index policy filtered to my-logpprod or my-logging.

You can also set up a transformer at the log-group level. For more information, see PutTransformer. If there is both a log-group level transformer created with PutTransformer and an account-level transformer that could apply to the same log group, the log group uses only the log-group level transformer. It ignores the account-level transformer.

Field index policy

You can use field index policies to create indexes on fields found in log events in the log group. Creating field indexes can help lower the scan volume for CloudWatch Logs Insights queries that reference those fields, because these queries attempt to skip the processing of log events that are known to not match the indexed field. Good fields to index are fields that you often need to query for and fields or values that match only a small fraction of the total log events. Common examples of indexes include request ID, session ID, user IDs, or instance IDs. For more information, see Create field indexes to improve query performance and reduce costs

To find the fields that are in your log group events, use the GetLogGroupFields operation.

For example, suppose you have created a field index for requestId. Then, any CloudWatch Logs Insights query on that log group that includes requestId = value or requestId in [value, value, ...] will attempt to process only the log events where the indexed field matches the specified value.

Matches of log events to the names of indexed fields are case-sensitive. For example, an indexed field of RequestId won't match a log event containing requestId.

You can have one account-level field index policy that applies to all log groups in the account. Or you can create as many as 20 account-level field index policies that are each scoped to a subset of log groups with the selectionCriteria parameter. If you have multiple account-level index policies with selection criteria, no two of them can use the same or overlapping log group name prefixes. For example, if you have one policy filtered to log groups that start with my-log, you can't have another field index policy filtered to my-logpprod or my-logging.

If you create an account-level field index policy in a monitoring account in cross-account observability, the policy is applied only to the monitoring account and not to any source accounts.

If you want to create a field index policy for a single log group, you can use PutIndexPolicy instead of PutAccountPolicy. If you do so, that log group will use only that log-group level policy, and will ignore the account-level policy that you create with PutAccountPolicy.

" }, "PutDataProtectionPolicy":{ "name":"PutDataProtectionPolicy", @@ -1015,7 +1015,7 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Creates or updates a logical delivery destination. A delivery destination is an Amazon Web Services resource that represents an Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and Firehose are supported as logs delivery destinations.

To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:

You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.

Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services.

If you use this operation to update an existing delivery destination, all the current delivery destination parameters are overwritten with the new parameter values that you specify.

" + "documentation":"

Creates or updates a logical delivery destination. A delivery destination is an Amazon Web Services resource that represents an Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and Firehose are supported as logs delivery destinations.

To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:

You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.

Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services.

If you use this operation to update an existing delivery destination, all the current delivery destination parameters are overwritten with the new parameter values that you specify.

" }, "PutDeliveryDestinationPolicy":{ "name":"PutDeliveryDestinationPolicy", @@ -1146,7 +1146,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"InvalidOperationException"} ], - "documentation":"

Creates or updates a metric filter and associates it with the specified log group. With metric filters, you can configure rules to extract metric data from log events ingested through PutLogEvents.

The maximum number of metric filters that can be associated with a log group is 100.

Using regular expressions to create metric filters is supported. For these filters, there is a quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in metric filters, see Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail.

When you create a metric filter, you can also optionally assign a unit and dimensions to the metric that is created.

Metrics extracted from log events are charged as custom metrics. To prevent unexpected high charges, do not specify high-cardinality fields such as IPAddress or requestID as dimensions. Each different value found for a dimension is treated as a separate metric and accrues charges as a separate custom metric.

CloudWatch Logs might disable a metric filter if it generates 1,000 different name/value pairs for your specified dimensions within one hour.

You can also set up a billing alarm to alert you if your charges are higher than expected. For more information, see Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges.

" + "documentation":"

Creates or updates a metric filter and associates it with the specified log group. With metric filters, you can configure rules to extract metric data from log events ingested through PutLogEvents.

The maximum number of metric filters that can be associated with a log group is 100.

Using regular expressions in filter patterns is supported. For these filters, there is a quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in filter patterns, see Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail.

When you create a metric filter, you can also optionally assign a unit and dimensions to the metric that is created.

Metrics extracted from log events are charged as custom metrics. To prevent unexpected high charges, do not specify high-cardinality fields such as IPAddress or requestID as dimensions. Each different value found for a dimension is treated as a separate metric and accrues charges as a separate custom metric.

CloudWatch Logs might disable a metric filter if it generates 1,000 different name/value pairs for your specified dimensions within one hour.

You can also set up a billing alarm to alert you if your charges are higher than expected. For more information, see Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges.

" }, "PutQueryDefinition":{ "name":"PutQueryDefinition", @@ -1209,7 +1209,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"InvalidOperationException"} ], - "documentation":"

Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.

The following destinations are supported for subscription filters:

Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName.

Using regular expressions to create subscription filters is supported. For these filters, there is a quotas of quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in subscription filters, see Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail.

To perform a PutSubscriptionFilter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.

" + "documentation":"

Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.

The following destinations are supported for subscription filters:

Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName.

Using regular expressions in filter patterns is supported. For these filters, there is a quotas of quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in filter patterns, see Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail.

To perform a PutSubscriptionFilter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.

" }, "PutTransformer":{ "name":"PutTransformer", @@ -4959,11 +4959,11 @@ }, "dataSourceRoleArn":{ "shape":"Arn", - "documentation":"

Specify the ARN of an IAM role that CloudWatch Logs will use to create the integration. This role must have the permissions necessary to access the OpenSearch Service collection to be able to create the dashboards. For more information about the permissions needed, see Create an IAM role to access the OpenSearch Service collection in the CloudWatch Logs User Guide.

" + "documentation":"

Specify the ARN of an IAM role that CloudWatch Logs will use to create the integration. This role must have the permissions necessary to access the OpenSearch Service collection to be able to create the dashboards. For more information about the permissions needed, see Permissions that the integration needs in the CloudWatch Logs User Guide.

" }, "dashboardViewerPrincipals":{ "shape":"DashboardViewerPrincipals", - "documentation":"

Specify the ARNs of IAM roles and IAM users who you want to grant permission to for viewing the dashboards.

In addition to specifying these users here, you must also grant them the CloudWatchOpenSearchDashboardsAccess IAM policy. For more information, see

" + "documentation":"

Specify the ARNs of IAM roles and IAM users who you want to grant permission to for viewing the dashboards.

In addition to specifying these users here, you must also grant them the CloudWatchOpenSearchDashboardAccess IAM policy. For more information, see IAM policies for users.

" }, "applicationArn":{ "shape":"Arn", @@ -5500,7 +5500,7 @@ }, "logType":{ "shape":"LogType", - "documentation":"

Defines the type of log that the source is sending.

" + "documentation":"

Defines the type of log that the source is sending.

" }, "tags":{ "shape":"Tags", @@ -6222,7 +6222,7 @@ "members":{ "suffixPath":{ "shape":"DeliverySuffixPath", - "documentation":"

This string allows re-configuring the S3 object prefix to contain either static or variable sections. The valid variables to use in the suffix path will vary by each log source. See ConfigurationTemplate$allowedSuffixPathFields for more info on what values are supported in the suffix path for each log source.

" + "documentation":"

This string allows re-configuring the S3 object prefix to contain either static or variable sections. The valid variables to use in the suffix path will vary by each log source. To find the values supported for the suffix path for each log source, use the DescribeConfigurationTemplates operation and check the allowedSuffixPathFields field in the response.

" }, "enableHiveCompatiblePath":{ "shape":"Boolean", @@ -6290,7 +6290,7 @@ "members":{ "message":{"shape":"Message"} }, - "documentation":"

his exception is returned if an unknown error occurs during a Live Tail session.

", + "documentation":"

This exception is returned if an unknown error occurs during a Live Tail session.

", "exception":true }, "SessionTimeoutException":{ diff --git a/botocore/data/quicksight/2018-04-01/service-2.json b/botocore/data/quicksight/2018-04-01/service-2.json index 2668fd01ea..fe9f13996b 100644 --- a/botocore/data/quicksight/2018-04-01/service-2.json +++ b/botocore/data/quicksight/2018-04-01/service-2.json @@ -16920,6 +16920,13 @@ }, "documentation":"

The configuration of destination parameter values.

This is a union type structure. For this structure to be valid, only one of the attributes can be defined.

" }, + "DigitGroupingStyle":{ + "type":"string", + "enum":[ + "DEFAULT", + "LAKHS" + ] + }, "DimensionField":{ "type":"structure", "members":{ @@ -25217,7 +25224,9 @@ "THOUSANDS", "MILLIONS", "BILLIONS", - "TRILLIONS" + "TRILLIONS", + "LAKHS", + "CRORES" ] }, "NumericAxisOptions":{ @@ -31878,7 +31887,7 @@ "TableFieldOptionList":{ "type":"list", "member":{"shape":"TableFieldOption"}, - "max":100 + "max":201 }, "TableFieldOptions":{ "type":"structure", @@ -32089,11 +32098,16 @@ "SCROLLED" ] }, + "TableUnaggregatedFieldList":{ + "type":"list", + "member":{"shape":"UnaggregatedField"}, + "max":201 + }, "TableUnaggregatedFieldWells":{ "type":"structure", "members":{ "Values":{ - "shape":"UnaggregatedFieldList", + "shape":"TableUnaggregatedFieldList", "documentation":"

The values field well for a pivot table. Values are unaggregated for an unaggregated table.

" } }, @@ -32846,6 +32860,10 @@ "Visibility":{ "shape":"Visibility", "documentation":"

Determines the visibility of the thousands separator.

" + }, + "GroupingStyle":{ + "shape":"DigitGroupingStyle", + "documentation":"

Determines the way numbers are styled to accommodate different readability standards. The DEFAULT value uses the standard international grouping system and groups numbers by the thousands. The LAKHS value uses the Indian numbering system and groups numbers by lakhs and crores.

" } }, "documentation":"

The options that determine the thousands separator configuration.

" diff --git a/botocore/data/sns/2010-03-31/service-2.json b/botocore/data/sns/2010-03-31/service-2.json index 8ff9f4cd24..70dd2549b9 100644 --- a/botocore/data/sns/2010-03-31/service-2.json +++ b/botocore/data/sns/2010-03-31/service-2.json @@ -1099,7 +1099,7 @@ }, "Attributes":{ "shape":"TopicAttributesMap", - "documentation":"

A map of attributes with their corresponding values.

The following lists names, descriptions, and values of the special request parameters that the CreateTopic action uses:

The following attribute applies only to server-side encryption:

The following attributes apply only to FIFO topics:

" + "documentation":"

A map of attributes with their corresponding values.

The following lists names, descriptions, and values of the special request parameters that the CreateTopic action uses:

The following attribute applies only to server-side encryption:

The following attributes apply only to FIFO topics:

" }, "Tags":{ "shape":"TagList", @@ -2022,7 +2022,7 @@ }, "MessageDeduplicationId":{ "shape":"String", - "documentation":"

This parameter applies only to FIFO (first-in-first-out) topics.

The token used for deduplication of messages within a 5-minute minimum deduplication interval. If a message with a particular MessageDeduplicationId is sent successfully, subsequent messages with the same MessageDeduplicationId are accepted successfully but aren't delivered.

The MessageDeduplicationId is available to the consumer of the message (this can be useful for troubleshooting delivery issues).

If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId after the deduplication interval, Amazon SNS can't detect duplicate messages.

Amazon SNS continues to keep track of the message deduplication ID even after the message is received and deleted.

The length of MessageDeduplicationId is 128 characters.

MessageDeduplicationId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

" + "documentation":"

This parameter applies only to FIFO (first-in-first-out) topics.

The MessageDeduplicationId is available to the consumer of the message (this can be useful for troubleshooting delivery issues).

If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId after the deduplication interval, Amazon SNS can't detect duplicate messages.

Amazon SNS continues to keep track of the message deduplication ID even after the message is received and deleted.

" }, "MessageGroupId":{ "shape":"String", @@ -2104,7 +2104,7 @@ }, "MessageDeduplicationId":{ "shape":"String", - "documentation":"

This parameter applies only to FIFO (first-in-first-out) topics. The MessageDeduplicationId can contain up to 128 alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

Every message must have a unique MessageDeduplicationId, which is a token used for deduplication of sent messages. If a message with a particular MessageDeduplicationId is sent successfully, any message sent with the same MessageDeduplicationId during the 5-minute deduplication interval is treated as a duplicate.

If the topic has ContentBasedDeduplication set, the system generates a MessageDeduplicationId based on the contents of the message. Your MessageDeduplicationId overrides the generated one.

" + "documentation":"" }, "MessageGroupId":{ "shape":"String", @@ -2311,7 +2311,7 @@ }, "AttributeName":{ "shape":"attributeName", - "documentation":"

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the SetTopicAttributes action uses:

The <ENDPOINT>SuccessFeedbackRoleArn and <ENDPOINT>FailureFeedbackRoleArn attributes are used to give Amazon SNS write access to use CloudWatch Logs on your behalf. The <ENDPOINT>SuccessFeedbackSampleRate attribute is for specifying the sample rate percentage (0-100) of successfully delivered messages. After you configure the <ENDPOINT>FailureFeedbackRoleArn attribute, then all failed message deliveries generate CloudWatch Logs.

The following attribute applies only to server-side-encryption:

The following attribute applies only to FIFO topics:

" + "documentation":"

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the SetTopicAttributes action uses:

The <ENDPOINT>SuccessFeedbackRoleArn and <ENDPOINT>FailureFeedbackRoleArn attributes are used to give Amazon SNS write access to use CloudWatch Logs on your behalf. The <ENDPOINT>SuccessFeedbackSampleRate attribute is for specifying the sample rate percentage (0-100) of successfully delivered messages. After you configure the <ENDPOINT>FailureFeedbackRoleArn attribute, then all failed message deliveries generate CloudWatch Logs.

The following attribute applies only to server-side-encryption:

The following attribute applies only to FIFO topics:

" }, "AttributeValue":{ "shape":"attributeValue", diff --git a/docs/source/conf.py b/docs/source/conf.py index 7b204f2db4..c000dd5a9d 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -59,7 +59,7 @@ # The short X.Y version. version = '1.36' # The full version, including alpha/beta/rc tags. -release = '1.36.2' +release = '1.36.3' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/tests/functional/endpoint-rules/cognito-idp/endpoint-tests-1.json b/tests/functional/endpoint-rules/cognito-idp/endpoint-tests-1.json index 8201879606..54232a5399 100644 --- a/tests/functional/endpoint-rules/cognito-idp/endpoint-tests-1.json +++ b/tests/functional/endpoint-rules/cognito-idp/endpoint-tests-1.json @@ -1,5 +1,31 @@ { "testCases": [ + { + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp.af-south-1.amazonaws.com" + } + }, + "params": { + "Region": "af-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp.ap-east-1.amazonaws.com" + } + }, + "params": { + "Region": "ap-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { @@ -26,6 +52,19 @@ "UseDualStack": false } }, + { + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp.ap-northeast-3.amazonaws.com" + } + }, + "params": { + "Region": "ap-northeast-3", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { @@ -39,6 +78,19 @@ "UseDualStack": false } }, + { + "documentation": "For region ap-south-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp.ap-south-2.amazonaws.com" + } + }, + "params": { + "Region": "ap-south-2", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { @@ -65,6 +117,32 @@ "UseDualStack": false } }, + { + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp.ap-southeast-3.amazonaws.com" + } + }, + "params": { + "Region": "ap-southeast-3", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp.ap-southeast-4.amazonaws.com" + } + }, + "params": { + "Region": "ap-southeast-4", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { @@ -78,6 +156,19 @@ "UseDualStack": false } }, + { + "documentation": "For region ca-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp.ca-west-1.amazonaws.com" + } + }, + "params": { + "Region": "ca-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { @@ -91,6 +182,19 @@ "UseDualStack": false } }, + { + "documentation": "For region eu-central-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp.eu-central-2.amazonaws.com" + } + }, + "params": { + "Region": "eu-central-2", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { @@ -104,6 +208,32 @@ "UseDualStack": false } }, + { + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp.eu-south-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-south-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp.eu-south-2.amazonaws.com" + } + }, + "params": { + "Region": "eu-south-2", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { @@ -143,6 +273,32 @@ "UseDualStack": false } }, + { + "documentation": "For region il-central-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp.il-central-1.amazonaws.com" + } + }, + "params": { + "Region": "il-central-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region me-central-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp.me-central-1.amazonaws.com" + } + }, + "params": { + "Region": "me-central-1", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { @@ -195,6 +351,19 @@ "UseDualStack": false } }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { @@ -221,6 +390,19 @@ "UseDualStack": false } }, + { + "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp-fips.us-east-2.amazonaws.com" + } + }, + "params": { + "Region": "us-east-2", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { @@ -247,6 +429,19 @@ "UseDualStack": false } }, + { + "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp-fips.us-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-west-1", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { @@ -274,14 +469,14 @@ } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cognito-idp-fips.us-east-1.api.aws" + "url": "https://cognito-idp-fips.us-west-2.amazonaws.com" } }, "params": { - "Region": "us-east-1", + "Region": "us-west-2", "UseFIPS": true, "UseDualStack": true } @@ -290,7 +485,7 @@ "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cognito-idp.us-east-1.api.aws" + "url": "https://cognito-idp.us-east-1.amazonaws.com" } }, "params": {