diff --git a/.changes/1.36.3.json b/.changes/1.36.3.json new file mode 100644 index 0000000000..94029600af --- /dev/null +++ b/.changes/1.36.3.json @@ -0,0 +1,42 @@ +[ + { + "category": "``batch``", + "description": "Documentation-only update: clarified the description of the shareDecaySeconds parameter of the FairsharePolicy data type, clarified the description of the priority parameter of the JobQueueDetail data type.", + "type": "api-change" + }, + { + "category": "``cognito-idp``", + "description": "corrects the dual-stack endpoint configuration for cognitoidp", + "type": "api-change" + }, + { + "category": "``connect``", + "description": "Added DeleteContactFlowVersion API and the CAMPAIGN flow type", + "type": "api-change" + }, + { + "category": "``emr-serverless``", + "description": "Increasing entryPoint in SparkSubmit to accept longer script paths. New limit is 4kb.", + "type": "api-change" + }, + { + "category": "``iotsitewise``", + "description": "AWS IoT SiteWise now supports ingestion and querying of Null (all data types) and NaN (double type) values of bad or uncertain data quality. New partial error handling prevents data loss during ingestion. Enabled by default for new customers; existing customers can opt-in.", + "type": "api-change" + }, + { + "category": "``logs``", + "description": "Documentation-only update to address doc errors", + "type": "api-change" + }, + { + "category": "``quicksight``", + "description": "Added `DigitGroupingStyle` in ThousandsSeparator to allow grouping by `LAKH`( Indian Grouping system ) currency. Support LAKH and `CRORE` currency types in Column Formatting.", + "type": "api-change" + }, + { + "category": "``sns``", + "description": "This release adds support for the topic attribute FifoThroughputScope for SNS FIFO topics. For details, see the documentation history in the Amazon Simple Notification Service Developer Guide.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 3c5061a1b4..4893c1ceee 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -23,13 +23,13 @@ jobs: uses: "actions/checkout@d632683dd7b4114ad314bca15554477dd762a938" - name: "Run CodeQL init" - uses: "github/codeql-action/init@662472033e021d55d94146f66f6058822b0b39fd" + uses: "github/codeql-action/init@b6a472f63d85b9c78a3ac5e89422239fc15e9b3c" with: config-file: "./.github/codeql.yml" languages: "python" - name: "Run CodeQL autobuild" - uses: "github/codeql-action/autobuild@662472033e021d55d94146f66f6058822b0b39fd" + uses: "github/codeql-action/autobuild@b6a472f63d85b9c78a3ac5e89422239fc15e9b3c" - name: "Run CodeQL analyze" - uses: "github/codeql-action/analyze@662472033e021d55d94146f66f6058822b0b39fd" + uses: "github/codeql-action/analyze@b6a472f63d85b9c78a3ac5e89422239fc15e9b3c" diff --git a/.github/workflows/stale_issue.yml b/.github/workflows/stale_issue.yml index 7789fe4889..723bfcdb0f 100644 --- a/.github/workflows/stale_issue.yml +++ b/.github/workflows/stale_issue.yml @@ -17,7 +17,7 @@ jobs: runs-on: ubuntu-latest name: Stale issue job steps: - - uses: aws-actions/stale-issue-cleanup@413d85a1603df4f0a8158f5f3c8204ab1691313e + - uses: aws-actions/stale-issue-cleanup@c452909aaa3fd1be240576cb41c6dd7bcb95cc10 with: issue-types: issues stale-issue-message: Greetings! It looks like this issue hasn’t been diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f5027eccc0..b37fe4109f 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,19 @@ CHANGELOG ========= +1.36.3 +====== + +* api-change:``batch``: Documentation-only update: clarified the description of the shareDecaySeconds parameter of the FairsharePolicy data type, clarified the description of the priority parameter of the JobQueueDetail data type. +* api-change:``cognito-idp``: corrects the dual-stack endpoint configuration for cognitoidp +* api-change:``connect``: Added DeleteContactFlowVersion API and the CAMPAIGN flow type +* api-change:``emr-serverless``: Increasing entryPoint in SparkSubmit to accept longer script paths. New limit is 4kb. +* api-change:``iotsitewise``: AWS IoT SiteWise now supports ingestion and querying of Null (all data types) and NaN (double type) values of bad or uncertain data quality. New partial error handling prevents data loss during ingestion. Enabled by default for new customers; existing customers can opt-in. +* api-change:``logs``: Documentation-only update to address doc errors +* api-change:``quicksight``: Added `DigitGroupingStyle` in ThousandsSeparator to allow grouping by `LAKH`( Indian Grouping system ) currency. Support LAKH and `CRORE` currency types in Column Formatting. +* api-change:``sns``: This release adds support for the topic attribute FifoThroughputScope for SNS FIFO topics. For details, see the documentation history in the Amazon Simple Notification Service Developer Guide. + + 1.36.2 ====== diff --git a/botocore/__init__.py b/botocore/__init__.py index 9a335803d0..18e80ef6cb 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.36.2' +__version__ = '1.36.3' class NullHandler(logging.Handler): diff --git a/botocore/data/batch/2016-08-10/service-2.json b/botocore/data/batch/2016-08-10/service-2.json index 2f2550a1f2..5cc1f6cb65 100644 --- a/botocore/data/batch/2016-08-10/service-2.json +++ b/botocore/data/batch/2016-08-10/service-2.json @@ -2366,7 +2366,7 @@ "members":{ "shareDecaySeconds":{ "shape":"Integer", - "documentation":"
The amount of time (in seconds) to use to calculate a fair share percentage for each fair share identifier in use. A value of zero (0) indicates that only current usage is measured. The decay allows for more recently run jobs to have more weight than jobs that ran earlier. The maximum supported value is 604800 (1 week).
" + "documentation":"The amount of time (in seconds) to use to calculate a fair share percentage for each fair share identifier in use. A value of zero (0) indicates the default minimum time window (600 seconds). The maximum supported value is 604800 (1 week).
The decay allows for more recently run jobs to have more weight than jobs that ran earlier. Consider adjusting this number if you have jobs that (on average) run longer than ten minutes, or a large difference in job count or job run times between share identifiers, and the allocation of resources doesn’t meet your needs.
" }, "computeReservation":{ "shape":"Integer", @@ -2779,7 +2779,7 @@ }, "priority":{ "shape":"Integer", - "documentation":"The priority of the job queue. Job queues with a higher priority (or a higher integer value for the priority
parameter) are evaluated first when associated with the same compute environment. Priority is determined in descending order. For example, a job queue with a priority value of 10
is given scheduling preference over a job queue with a priority value of 1
. All of the compute environments must be either Amazon EC2 (EC2
or SPOT
) or Fargate (FARGATE
or FARGATE_SPOT
). Amazon EC2 and Fargate compute environments can't be mixed.
The priority of the job queue. Job queue priority determines the order that job queues are evaluated when multiple queues dispatch jobs within a shared compute environment. A higher value for priority
indicates a higher priority. Queues are evaluated in cycles, in descending order by priority. For example, a job queue with a priority value of 10
is evaluated before a queue with a priority value of 1
. All of the compute environments must be either Amazon EC2 (EC2
or SPOT
) or Fargate (FARGATE
or FARGATE_SPOT
). Amazon EC2 and Fargate compute environments can't be mixed.
Job queue priority doesn't guarantee that a particular job executes before a job in a lower priority queue. Jobs added to higher priority queues during the queue evaluation cycle might not be evaluated until the next cycle. A job is dispatched from a queue only if resources are available when the queue is evaluated. If there are insufficient resources available at that time, the cycle proceeds to the next queue. This means that jobs added to higher priority queues might have to wait for jobs in multiple lower priority queues to complete before they are dispatched. You can use job dependencies to control the order for jobs from queues with different priorities. For more information, see Job Dependencies in the Batch User Guide.
The instance type or family that this this override launch template should be applied to.
This parameter is required when defining a launch template override.
Information included in this parameter must meet the following requirements:
Must be a valid Amazon EC2 instance type or family.
optimal
isn't allowed.
targetInstanceTypes
can target only instance types and families that are included within the ComputeResource.instanceTypes
set. targetInstanceTypes
doesn't need to include all of the instances from the instanceType
set, but at least a subset. For example, if ComputeResource.instanceTypes
includes [m5, g5]
, targetInstanceTypes
can include [m5.2xlarge]
and [m5.large]
but not [c5.large]
.
targetInstanceTypes
included within the same launch template override or across launch template overrides can't overlap for the same compute environment. For example, you can't define one launch template override to target an instance family and another define an instance type within this same family.
The instance type or family that this override launch template should be applied to.
This parameter is required when defining a launch template override.
Information included in this parameter must meet the following requirements:
Must be a valid Amazon EC2 instance type or family.
optimal
isn't allowed.
targetInstanceTypes
can target only instance types and families that are included within the ComputeResource.instanceTypes
set. targetInstanceTypes
doesn't need to include all of the instances from the instanceType
set, but at least a subset. For example, if ComputeResource.instanceTypes
includes [m5, g5]
, targetInstanceTypes
can include [m5.2xlarge]
and [m5.large]
but not [c5.large]
.
targetInstanceTypes
included within the same launch template override or across launch template overrides can't overlap for the same compute environment. For example, you can't define one launch template override to target an instance family and another define an instance type within this same family.
An object that represents a launch template to use in place of the default launch template. You must specify either the launch template ID or launch template name in the request, but not both.
If security groups are specified using both the securityGroupIds
parameter of CreateComputeEnvironment
and the launch template, the values in the securityGroupIds
parameter of CreateComputeEnvironment
will be used.
You can define up to ten (10) overrides for each compute environment.
This object isn't applicable to jobs that are running on Fargate resources.
To unset all override templates for a compute environment, you can pass an empty array to the UpdateComputeEnvironment.overrides parameter, or not include the overrides
parameter when submitting the UpdateComputeEnvironment
API operation.
The environment variables to pass to a container. This parameter maps to Env inthe Create a container section of the Docker Remote API and the --env
parameter to docker run.
We don't recommend using plaintext environment variables for sensitive information, such as credential data.
Environment variables cannot start with AWS_BATCH
. This naming convention is reserved for variables that Batch sets.
The environment variables to pass to a container. This parameter maps to Env in the Create a container section of the Docker Remote API and the --env
parameter to docker run.
We don't recommend using plaintext environment variables for sensitive information, such as credential data.
Environment variables cannot start with AWS_BATCH
. This naming convention is reserved for variables that Batch sets.
>Associates a set of proficiencies with a user.
" + "documentation":"Associates a set of proficiencies with a user.
" }, "BatchAssociateAnalyticsDataSet":{ "name":"BatchAssociateAnalyticsDataSet", @@ -499,7 +499,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ThrottlingException"} ], - "documentation":"Publishes a new version of the flow provided. Versions are immutable and monotonically increasing. If a version of the same flow content already exists, no new version is created and instead the existing version number is returned. If the FlowContentSha256
provided is different from the FlowContentSha256
of the $LATEST
published flow content, then an error is returned. This API only supports creating versions for flows of type Campaign
.
Publishes a new version of the flow provided. Versions are immutable and monotonically increasing. If the FlowContentSha256
provided is different from the FlowContentSha256
of the $LATEST
published flow content, then an error is returned. This API only supports creating versions for flows of type Campaign
.
Deletes the specified flow module.
" }, + "DeleteContactFlowVersion":{ + "name":"DeleteContactFlowVersion", + "http":{ + "method":"DELETE", + "requestUri":"/contact-flows/{InstanceId}/{ContactFlowId}/version/{ContactFlowVersion}" + }, + "input":{"shape":"DeleteContactFlowVersionRequest"}, + "output":{"shape":"DeleteContactFlowVersionResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"Deletes the particular version specified in flow version identifier.
" + }, "DeleteEmailAddress":{ "name":"DeleteEmailAddress", "http":{ @@ -1203,7 +1221,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"Deletes a queue. It isn't possible to delete a queue by using the Amazon Connect admin website.
" + "documentation":"Deletes a queue.
" }, "DeleteQuickConnect":{ "name":"DeleteQuickConnect", @@ -1493,7 +1511,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"Describes the specified flow.
You can also create and update flows using the Amazon Connect Flow language.
Use the $SAVED
alias in the request to describe the SAVED
content of a Flow. For example, arn:aws:.../contact-flow/{id}:$SAVED
. After a flow is published, $SAVED
needs to be supplied to view saved content that has not been published.
In the response, Status indicates the flow status as either SAVED
or PUBLISHED
. The PUBLISHED
status will initiate validation on the content. SAVED
does not initiate validation of the content. SAVED
| PUBLISHED
Describes the specified flow.
You can also create and update flows using the Amazon Connect Flow language.
Use the $SAVED
alias in the request to describe the SAVED
content of a Flow. For example, arn:aws:.../contact-flow/{id}:$SAVED
. After a flow is published, $SAVED
needs to be supplied to view saved content that has not been published.
Use arn:aws:.../contact-flow/{id}:{version}
to retrieve the content of a specific flow version.
In the response, Status indicates the flow status as either SAVED
or PUBLISHED
. The PUBLISHED
status will initiate validation on the content. SAVED
does not initiate validation of the content. SAVED
| PUBLISHED
Indicates the checksum value of the flow content.
" + "documentation":"Indicates the checksum value of the latest published flow content.
" } } }, @@ -7863,6 +7882,10 @@ "shape":"FlowContentSha256", "documentation":"Indicates the checksum value of the flow content.
" }, + "ContactFlowVersion":{ + "shape":"ResourceVersion", + "documentation":"The identifier of the flow version.
" + }, "LastModifiedTime":{ "shape":"Timestamp", "documentation":"The Amazon Web Services Region where this resource was last modified.
" @@ -9617,6 +9640,39 @@ "members":{ } }, + "DeleteContactFlowVersionRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "ContactFlowId", + "ContactFlowVersion" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.
", + "location":"uri", + "locationName":"InstanceId" + }, + "ContactFlowId":{ + "shape":"ARN", + "documentation":"The identifier of the flow.
", + "location":"uri", + "locationName":"ContactFlowId" + }, + "ContactFlowVersion":{ + "shape":"ResourceVersion", + "documentation":"The identifier of the flow version.
", + "location":"uri", + "locationName":"ContactFlowVersion" + } + } + }, + "DeleteContactFlowVersionResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteEmailAddressRequest":{ "type":"structure", "required":[ diff --git a/botocore/data/emr-serverless/2021-07-13/service-2.json b/botocore/data/emr-serverless/2021-07-13/service-2.json index 86100c0cb3..d32f0d655d 100644 --- a/botocore/data/emr-serverless/2021-07-13/service-2.json +++ b/botocore/data/emr-serverless/2021-07-13/service-2.json @@ -822,7 +822,7 @@ }, "EntryPointPath":{ "type":"string", - "max":256, + "max":4096, "min":1, "pattern":".*\\S.*", "sensitive":true diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 1bdb751b5b..bae1e8fe16 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -8757,6 +8757,13 @@ "deprecated" : true, "hostname" : "elasticfilesystem-fips.me-south-1.amazonaws.com" }, + "fips-mx-central-1" : { + "credentialScope" : { + "region" : "mx-central-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.mx-central-1.amazonaws.com" + }, "fips-sa-east-1" : { "credentialScope" : { "region" : "sa-east-1" @@ -8810,6 +8817,12 @@ "tags" : [ "fips" ] } ] }, + "mx-central-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.mx-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "sa-east-1" : { "variants" : [ { "hostname" : "elasticfilesystem-fips.sa-east-1.amazonaws.com", diff --git a/botocore/data/iotsitewise/2019-12-02/service-2.json b/botocore/data/iotsitewise/2019-12-02/service-2.json index 721158573e..2d97cd3c17 100644 --- a/botocore/data/iotsitewise/2019-12-02/service-2.json +++ b/botocore/data/iotsitewise/2019-12-02/service-2.json @@ -3583,6 +3583,10 @@ "type":"structure", "required":["entries"], "members":{ + "enablePartialEntryProcessing":{ + "shape":"BooleanValue", + "documentation":"This setting enables partial ingestion at entry-level. If set to true
, we ingest all TQVs not resulting in an error. If set to false
, an invalid TQV fails ingestion of the entire entry that contains it.
The list of asset property value entries for the batch put request. You can specify up to 10 entries per request.
" @@ -3599,6 +3603,7 @@ } } }, + "BooleanValue":{"type":"boolean"}, "Bucket":{ "type":"string", "max":63, @@ -6023,6 +6028,10 @@ "warmTierRetentionPeriod":{ "shape":"WarmTierRetentionPeriod", "documentation":"Set this period to specify how long your data is stored in the warm tier before it is deleted. You can set this only if cold tier is enabled.
" + }, + "disallowIngestNullNaN":{ + "shape":"DisallowIngestNullNaN", + "documentation":"Describes the configuration for ingesting NULL and NaN data. By default the feature is allowed. The feature is disallowed if the value is true
.
The type of null asset property data.
" + } + }, + "documentation":"The value type of null asset property data with BAD and UNCERTAIN qualities.
" + }, "PropertyValueStringValue":{"type":"string"}, "PutAssetPropertyValueEntries":{ "type":"list", @@ -8652,6 +8670,10 @@ "warmTierRetentionPeriod":{ "shape":"WarmTierRetentionPeriod", "documentation":"Set this period to specify how long your data is stored in the warm tier before it is deleted. You can set this only if cold tier is enabled.
" + }, + "disallowIngestNullNaN":{ + "shape":"DisallowIngestNullNaN", + "documentation":"Describes the configuration for ingesting NULL and NaN data. By default the feature is allowed. The feature is disallowed if the value is true
.
Set this period to specify how long your data is stored in the warm tier before it is deleted. You can set this only if cold tier is enabled.
" + }, + "disallowIngestNullNaN":{ + "shape":"DisallowIngestNullNaN", + "documentation":"Describes the configuration for ingesting NULL and NaN data. By default the feature is allowed. The feature is disallowed if the value is true
.
Asset property data of type string (sequence of characters).
" + "documentation":"Asset property data of type string (sequence of characters). The allowed pattern: \"^$|[^\\u0000-\\u001F\\u007F]+\". The max length is 1024.
" }, "integerValue":{ "shape":"PropertyValueIntegerValue", @@ -9781,11 +9817,15 @@ }, "doubleValue":{ "shape":"PropertyValueDoubleValue", - "documentation":"Asset property data of type double (floating point number).
" + "documentation":"Asset property data of type double (floating point number). The min value is -10^10. The max value is 10^10. Double.NaN is allowed.
" }, "booleanValue":{ "shape":"PropertyValueBooleanValue", "documentation":"Asset property data of type Boolean (true or false).
" + }, + "nullValue":{ + "shape":"PropertyValueNullValue", + "documentation":"The type of null asset property data with BAD and UNCERTAIN qualities.
" } }, "documentation":"Contains an asset property value (of a single type only).
" diff --git a/botocore/data/logs/2014-03-28/service-2.json b/botocore/data/logs/2014-03-28/service-2.json index b31f879823..0c2b284698 100644 --- a/botocore/data/logs/2014-03-28/service-2.json +++ b/botocore/data/logs/2014-03-28/service-2.json @@ -27,7 +27,7 @@ {"shape":"OperationAbortedException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"Associates the specified KMS key with either one log group in the account, or with all stored CloudWatch Logs query insights results in the account.
When you use AssociateKmsKey
, you specify either the logGroupName
parameter or the resourceIdentifier
parameter. You can't specify both of those parameters in the same operation.
Specify the logGroupName
parameter to cause all log events stored in the log group to be encrypted with that key. Only the log events ingested after the key is associated are encrypted with that key.
Associating a KMS key with a log group overrides any existing associations between the log group and a KMS key. After a KMS key is associated with a log group, all newly ingested data for the log group is encrypted using the KMS key. This association is stored as long as the data encrypted with the KMS key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested.
Associating a key with a log group does not cause the results of queries of that log group to be encrypted with that key. To have query results encrypted with a KMS key, you must use an AssociateKmsKey
operation with the resourceIdentifier
parameter that specifies a query-result
resource.
Specify the resourceIdentifier
parameter with a query-result
resource, to use that key to encrypt the stored results of all future StartQuery operations in the account. The response from a GetQueryResults operation will still return the query results in plain text.
Even if you have not associated a key with your query results, the query results are encrypted when stored, using the default CloudWatch Logs method.
If you run a query from a monitoring account that queries logs in a source account, the query results key from the monitoring account, if any, is used.
If you delete the key that is used to encrypt log events or log group query results, then all the associated stored log events or query results that were encrypted with that key will be unencryptable and unusable.
CloudWatch Logs supports only symmetric KMS keys. Do not use an associate an asymmetric KMS key with your log group or query results. For more information, see Using Symmetric and Asymmetric Keys.
It can take up to 5 minutes for this operation to take effect.
If you attempt to associate a KMS key with a log group but the KMS key does not exist or the KMS key is disabled, you receive an InvalidParameterException
error.
Associates the specified KMS key with either one log group in the account, or with all stored CloudWatch Logs query insights results in the account.
When you use AssociateKmsKey
, you specify either the logGroupName
parameter or the resourceIdentifier
parameter. You can't specify both of those parameters in the same operation.
Specify the logGroupName
parameter to cause log events ingested into that log group to be encrypted with that key. Only the log events ingested after the key is associated are encrypted with that key.
Associating a KMS key with a log group overrides any existing associations between the log group and a KMS key. After a KMS key is associated with a log group, all newly ingested data for the log group is encrypted using the KMS key. This association is stored as long as the data encrypted with the KMS key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested.
Associating a key with a log group does not cause the results of queries of that log group to be encrypted with that key. To have query results encrypted with a KMS key, you must use an AssociateKmsKey
operation with the resourceIdentifier
parameter that specifies a query-result
resource.
Specify the resourceIdentifier
parameter with a query-result
resource, to use that key to encrypt the stored results of all future StartQuery operations in the account. The response from a GetQueryResults operation will still return the query results in plain text.
Even if you have not associated a key with your query results, the query results are encrypted when stored, using the default CloudWatch Logs method.
If you run a query from a monitoring account that queries logs in a source account, the query results key from the monitoring account, if any, is used.
If you delete the key that is used to encrypt log events or log group query results, then all the associated stored log events or query results that were encrypted with that key will be unencryptable and unusable.
CloudWatch Logs supports only symmetric KMS keys. Do not use an associate an asymmetric KMS key with your log group or query results. For more information, see Using Symmetric and Asymmetric Keys.
It can take up to 5 minutes for this operation to take effect.
If you attempt to associate a KMS key with a log group but the KMS key does not exist or the KMS key is disabled, you receive an InvalidParameterException
error.
Creates an export task so that you can efficiently export data from a log group to an Amazon S3 bucket. When you perform a CreateExportTask
operation, you must use credentials that have permission to write to the S3 bucket that you specify as the destination.
Exporting log data to S3 buckets that are encrypted by KMS is supported. Exporting log data to Amazon S3 buckets that have S3 Object Lock enabled with a retention period is also supported.
Exporting to S3 buckets that are encrypted with AES-256 is supported.
This is an asynchronous call. If all the required information is provided, this operation initiates an export task and responds with the ID of the task. After the task has started, you can use DescribeExportTasks to get the status of the export task. Each account can only have one active (RUNNING
or PENDING
) export task at a time. To cancel an export task, use CancelExportTask.
You can export logs from multiple log groups or multiple time ranges to the same S3 bucket. To separate log data for each export task, specify a prefix to be used as the Amazon S3 key prefix for all exported objects.
Time-based sorting on chunks of log data inside an exported file is not guaranteed. You can sort the exported log field data by using Linux utilities.
Creates an export task so that you can efficiently export data from a log group to an Amazon S3 bucket. When you perform a CreateExportTask
operation, you must use credentials that have permission to write to the S3 bucket that you specify as the destination.
Exporting log data to S3 buckets that are encrypted by KMS is supported. Exporting log data to Amazon S3 buckets that have S3 Object Lock enabled with a retention period is also supported.
Exporting to S3 buckets that are encrypted with AES-256 is supported.
This is an asynchronous call. If all the required information is provided, this operation initiates an export task and responds with the ID of the task. After the task has started, you can use DescribeExportTasks to get the status of the export task. Each account can only have one active (RUNNING
or PENDING
) export task at a time. To cancel an export task, use CancelExportTask.
You can export logs from multiple log groups or multiple time ranges to the same S3 bucket. To separate log data for each export task, specify a prefix to be used as the Amazon S3 key prefix for all exported objects.
We recommend that you don't regularly export to Amazon S3 as a way to continuously archive your logs. For that use case, we instaed recommend that you use subscriptions. For more information about subscriptions, see Real-time processing of log data with subscriptions.
Time-based sorting on chunks of log data inside an exported file is not guaranteed. You can sort the exported log field data by using Linux utilities.
Deletes s delivery. A delivery is a connection between a logical delivery source and a logical delivery destination. Deleting a delivery only deletes the connection between the delivery source and delivery destination. It does not delete the delivery destination or the delivery source.
" + "documentation":"Deletes a delivery. A delivery is a connection between a logical delivery source and a logical delivery destination. Deleting a delivery only deletes the connection between the delivery source and delivery destination. It does not delete the delivery destination or the delivery source.
" }, "DeleteDeliveryDestination":{ "name":"DeleteDeliveryDestination", @@ -422,7 +422,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"Returns a list of all CloudWatch Logs account policies in the account.
" + "documentation":"Returns a list of all CloudWatch Logs account policies in the account.
To use this operation, you must be signed on with the correct permissions depending on the type of policy that you are retrieving information for.
To see data protection policies, you must have the logs:GetDataProtectionPolicy
and logs:DescribeAccountPolicies
permissions.
To see subscription filter policies, you must have the logs:DescrubeSubscriptionFilters
and logs:DescribeAccountPolicies
permissions.
To see transformer policies, you must have the logs:GetTransformer
and logs:DescribeAccountPolicies
permissions.
To see field index policies, you must have the logs:DescribeIndexPolicies
and logs:DescribeAccountPolicies
permissions.
Lists the log streams for the specified log group. You can list all the log streams or filter the results by prefix. You can also control how the results are ordered.
You can specify the log group to search by using either logGroupIdentifier
or logGroupName
. You must include one of these two parameters, but you can't include both.
This operation has a limit of five transactions per second, after which transactions are throttled.
If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.
" + "documentation":"Lists the log streams for the specified log group. You can list all the log streams or filter the results by prefix. You can also control how the results are ordered.
You can specify the log group to search by using either logGroupIdentifier
or logGroupName
. You must include one of these two parameters, but you can't include both.
This operation has a limit of 25 transactions per second, after which transactions are throttled.
If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.
" }, "DescribeMetricFilters":{ "name":"DescribeMetricFilters", @@ -980,7 +980,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"LimitExceededException"} ], - "documentation":"Creates an account-level data protection policy, subscription filter policy, or field index policy that applies to all log groups or a subset of log groups in the account.
Data protection policy
A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy.
Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked.
If you use PutAccountPolicy
to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked.
By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask
permission can use a GetLogEvents or FilterLogEvents operation with the unmask
parameter set to true
to view the unmasked log events. Users with the logs:Unmask
can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask
query command.
For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking.
To use the PutAccountPolicy
operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy
and logs:PutAccountPolicy
permissions.
The PutAccountPolicy
operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked.
Subscription filter policy
A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.
The following destinations are supported for subscription filters:
An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.
An Firehose data stream in the same account as the subscription policy, for same-account delivery.
A Lambda function in the same account as the subscription policy, for same-account delivery.
A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations.
Each account can have one account-level subscription filter policy per Region. If you are updating an existing filter, you must specify the correct name in PolicyName
. To perform a PutAccountPolicy
subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole
permission.
Transformer policy
Creates or updates a log transformer policy for your account. You use log transformers to transform log events into a different format, making them easier for you to process and analyze. You can also transform logs from different sources into standardized formats that contain relevant, source-specific information. After you have created a transformer, CloudWatch Logs performs this transformation at the time of log ingestion. You can then refer to the transformed versions of the logs during operations such as querying with CloudWatch Logs Insights or creating metric filters or subscription filters.
You can also use a transformer to copy metadata from metadata keys into the log events themselves. This metadata can include log group name, log stream name, account ID and Region.
A transformer for a log group is a series of processors, where each processor applies one type of transformation to the log events ingested into this log group. For more information about the available processors to use in a transformer, see Processors that you can use.
Having log events in standardized format enables visibility across your applications for your log analysis, reporting, and alarming needs. CloudWatch Logs provides transformation for common log types with out-of-the-box transformation templates for major Amazon Web Services log sources such as VPC flow logs, Lambda, and Amazon RDS. You can use pre-built transformation templates or create custom transformation policies.
You can create transformers only for the log groups in the Standard log class.
You can have one account-level transformer policy that applies to all log groups in the account. Or you can create as many as 20 account-level transformer policies that are each scoped to a subset of log groups with the selectionCriteria
parameter. If you have multiple account-level transformer policies with selection criteria, no two of them can use the same or overlapping log group name prefixes. For example, if you have one policy filtered to log groups that start with my-log
, you can't have another field index policy filtered to my-logpprod
or my-logging
.
You can also set up a transformer at the log-group level. For more information, see PutTransformer. If there is both a log-group level transformer created with PutTransformer
and an account-level transformer that could apply to the same log group, the log group uses only the log-group level transformer. It ignores the account-level transformer.
Field index policy
You can use field index policies to create indexes on fields found in log events in the log group. Creating field indexes can help lower the scan volume for CloudWatch Logs Insights queries that reference those fields, because these queries attempt to skip the processing of log events that are known to not match the indexed field. Good fields to index are fields that you often need to query for and fields or values that match only a small fraction of the total log events. Common examples of indexes include request ID, session ID, user IDs, or instance IDs. For more information, see Create field indexes to improve query performance and reduce costs
To find the fields that are in your log group events, use the GetLogGroupFields operation.
For example, suppose you have created a field index for requestId
. Then, any CloudWatch Logs Insights query on that log group that includes requestId = value
or requestId in [value, value, ...]
will attempt to process only the log events where the indexed field matches the specified value.
Matches of log events to the names of indexed fields are case-sensitive. For example, an indexed field of RequestId
won't match a log event containing requestId
.
You can have one account-level field index policy that applies to all log groups in the account. Or you can create as many as 20 account-level field index policies that are each scoped to a subset of log groups with the selectionCriteria
parameter. If you have multiple account-level index policies with selection criteria, no two of them can use the same or overlapping log group name prefixes. For example, if you have one policy filtered to log groups that start with my-log
, you can't have another field index policy filtered to my-logpprod
or my-logging
.
If you create an account-level field index policy in a monitoring account in cross-account observability, the policy is applied only to the monitoring account and not to any source accounts.
If you want to create a field index policy for a single log group, you can use PutIndexPolicy instead of PutAccountPolicy
. If you do so, that log group will use only that log-group level policy, and will ignore the account-level policy that you create with PutAccountPolicy.
Creates an account-level data protection policy, subscription filter policy, or field index policy that applies to all log groups or a subset of log groups in the account.
To use this operation, you must be signed on with the correct permissions depending on the type of policy that you are creating.
To create a data protection policy, you must have the logs:PutDataProtectionPolicy
and logs:PutAccountPolicy
permissions.
To create a subscription filter policy, you must have the logs:PutSubscriptionFilter
and logs:PutccountPolicy
permissions.
To create a transformer policy, you must have the logs:PutTransformer
and logs:PutAccountPolicy
permissions.
To create a field index policy, you must have the logs:PutIndexPolicy
and logs:PutAccountPolicy
permissions.
Data protection policy
A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy.
Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked.
If you use PutAccountPolicy
to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked.
By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask
permission can use a GetLogEvents or FilterLogEvents operation with the unmask
parameter set to true
to view the unmasked log events. Users with the logs:Unmask
can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask
query command.
For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking.
To use the PutAccountPolicy
operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy
and logs:PutAccountPolicy
permissions.
The PutAccountPolicy
operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked.
Subscription filter policy
A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.
The following destinations are supported for subscription filters:
An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.
An Firehose data stream in the same account as the subscription policy, for same-account delivery.
A Lambda function in the same account as the subscription policy, for same-account delivery.
A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations.
Each account can have one account-level subscription filter policy per Region. If you are updating an existing filter, you must specify the correct name in PolicyName
. To perform a PutAccountPolicy
subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole
permission.
Transformer policy
Creates or updates a log transformer policy for your account. You use log transformers to transform log events into a different format, making them easier for you to process and analyze. You can also transform logs from different sources into standardized formats that contain relevant, source-specific information. After you have created a transformer, CloudWatch Logs performs this transformation at the time of log ingestion. You can then refer to the transformed versions of the logs during operations such as querying with CloudWatch Logs Insights or creating metric filters or subscription filters.
You can also use a transformer to copy metadata from metadata keys into the log events themselves. This metadata can include log group name, log stream name, account ID and Region.
A transformer for a log group is a series of processors, where each processor applies one type of transformation to the log events ingested into this log group. For more information about the available processors to use in a transformer, see Processors that you can use.
Having log events in standardized format enables visibility across your applications for your log analysis, reporting, and alarming needs. CloudWatch Logs provides transformation for common log types with out-of-the-box transformation templates for major Amazon Web Services log sources such as VPC flow logs, Lambda, and Amazon RDS. You can use pre-built transformation templates or create custom transformation policies.
You can create transformers only for the log groups in the Standard log class.
You can have one account-level transformer policy that applies to all log groups in the account. Or you can create as many as 20 account-level transformer policies that are each scoped to a subset of log groups with the selectionCriteria
parameter. If you have multiple account-level transformer policies with selection criteria, no two of them can use the same or overlapping log group name prefixes. For example, if you have one policy filtered to log groups that start with my-log
, you can't have another field index policy filtered to my-logpprod
or my-logging
.
You can also set up a transformer at the log-group level. For more information, see PutTransformer. If there is both a log-group level transformer created with PutTransformer
and an account-level transformer that could apply to the same log group, the log group uses only the log-group level transformer. It ignores the account-level transformer.
Field index policy
You can use field index policies to create indexes on fields found in log events in the log group. Creating field indexes can help lower the scan volume for CloudWatch Logs Insights queries that reference those fields, because these queries attempt to skip the processing of log events that are known to not match the indexed field. Good fields to index are fields that you often need to query for and fields or values that match only a small fraction of the total log events. Common examples of indexes include request ID, session ID, user IDs, or instance IDs. For more information, see Create field indexes to improve query performance and reduce costs
To find the fields that are in your log group events, use the GetLogGroupFields operation.
For example, suppose you have created a field index for requestId
. Then, any CloudWatch Logs Insights query on that log group that includes requestId = value
or requestId in [value, value, ...]
will attempt to process only the log events where the indexed field matches the specified value.
Matches of log events to the names of indexed fields are case-sensitive. For example, an indexed field of RequestId
won't match a log event containing requestId
.
You can have one account-level field index policy that applies to all log groups in the account. Or you can create as many as 20 account-level field index policies that are each scoped to a subset of log groups with the selectionCriteria
parameter. If you have multiple account-level index policies with selection criteria, no two of them can use the same or overlapping log group name prefixes. For example, if you have one policy filtered to log groups that start with my-log
, you can't have another field index policy filtered to my-logpprod
or my-logging
.
If you create an account-level field index policy in a monitoring account in cross-account observability, the policy is applied only to the monitoring account and not to any source accounts.
If you want to create a field index policy for a single log group, you can use PutIndexPolicy instead of PutAccountPolicy
. If you do so, that log group will use only that log-group level policy, and will ignore the account-level policy that you create with PutAccountPolicy.
Creates or updates a logical delivery destination. A delivery destination is an Amazon Web Services resource that represents an Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and Firehose are supported as logs delivery destinations.
To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:
Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource.
Use PutDeliveryDestination
to create a delivery destination, which is a logical object that represents the actual delivery destination.
If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination.
Use CreateDelivery
to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery.
You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services.
If you use this operation to update an existing delivery destination, all the current delivery destination parameters are overwritten with the new parameter values that you specify.
" + "documentation":"Creates or updates a logical delivery destination. A delivery destination is an Amazon Web Services resource that represents an Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and Firehose are supported as logs delivery destinations.
To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:
Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource.
Use PutDeliveryDestination
to create a delivery destination in the same account of the actual delivery destination. The delivery destination that you create is a logical object that represents the actual delivery destination.
If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination.
Use CreateDelivery
to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery.
You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services.
If you use this operation to update an existing delivery destination, all the current delivery destination parameters are overwritten with the new parameter values that you specify.
" }, "PutDeliveryDestinationPolicy":{ "name":"PutDeliveryDestinationPolicy", @@ -1146,7 +1146,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"InvalidOperationException"} ], - "documentation":"Creates or updates a metric filter and associates it with the specified log group. With metric filters, you can configure rules to extract metric data from log events ingested through PutLogEvents.
The maximum number of metric filters that can be associated with a log group is 100.
Using regular expressions to create metric filters is supported. For these filters, there is a quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in metric filters, see Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail.
When you create a metric filter, you can also optionally assign a unit and dimensions to the metric that is created.
Metrics extracted from log events are charged as custom metrics. To prevent unexpected high charges, do not specify high-cardinality fields such as IPAddress
or requestID
as dimensions. Each different value found for a dimension is treated as a separate metric and accrues charges as a separate custom metric.
CloudWatch Logs might disable a metric filter if it generates 1,000 different name/value pairs for your specified dimensions within one hour.
You can also set up a billing alarm to alert you if your charges are higher than expected. For more information, see Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges.
Creates or updates a metric filter and associates it with the specified log group. With metric filters, you can configure rules to extract metric data from log events ingested through PutLogEvents.
The maximum number of metric filters that can be associated with a log group is 100.
Using regular expressions in filter patterns is supported. For these filters, there is a quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in filter patterns, see Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail.
When you create a metric filter, you can also optionally assign a unit and dimensions to the metric that is created.
Metrics extracted from log events are charged as custom metrics. To prevent unexpected high charges, do not specify high-cardinality fields such as IPAddress
or requestID
as dimensions. Each different value found for a dimension is treated as a separate metric and accrues charges as a separate custom metric.
CloudWatch Logs might disable a metric filter if it generates 1,000 different name/value pairs for your specified dimensions within one hour.
You can also set up a billing alarm to alert you if your charges are higher than expected. For more information, see Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges.
Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.
The following destinations are supported for subscription filters:
An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery.
A logical destination created with PutDestination that belongs to a different account, for cross-account delivery. We currently support Kinesis Data Streams and Firehose as logical destinations.
An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery.
An Lambda function that belongs to the same account as the subscription filter, for same-account delivery.
Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName
.
Using regular expressions to create subscription filters is supported. For these filters, there is a quotas of quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in subscription filters, see Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail.
To perform a PutSubscriptionFilter
operation for any destination except a Lambda function, you must also have the iam:PassRole
permission.
Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.
The following destinations are supported for subscription filters:
An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery.
A logical destination created with PutDestination that belongs to a different account, for cross-account delivery. We currently support Kinesis Data Streams and Firehose as logical destinations.
An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery.
An Lambda function that belongs to the same account as the subscription filter, for same-account delivery.
Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName
.
Using regular expressions in filter patterns is supported. For these filters, there is a quotas of quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in filter patterns, see Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail.
To perform a PutSubscriptionFilter
operation for any destination except a Lambda function, you must also have the iam:PassRole
permission.
Specify the ARN of an IAM role that CloudWatch Logs will use to create the integration. This role must have the permissions necessary to access the OpenSearch Service collection to be able to create the dashboards. For more information about the permissions needed, see Create an IAM role to access the OpenSearch Service collection in the CloudWatch Logs User Guide.
" + "documentation":"Specify the ARN of an IAM role that CloudWatch Logs will use to create the integration. This role must have the permissions necessary to access the OpenSearch Service collection to be able to create the dashboards. For more information about the permissions needed, see Permissions that the integration needs in the CloudWatch Logs User Guide.
" }, "dashboardViewerPrincipals":{ "shape":"DashboardViewerPrincipals", - "documentation":"Specify the ARNs of IAM roles and IAM users who you want to grant permission to for viewing the dashboards.
In addition to specifying these users here, you must also grant them the CloudWatchOpenSearchDashboardsAccess IAM policy. For more information, see
Specify the ARNs of IAM roles and IAM users who you want to grant permission to for viewing the dashboards.
In addition to specifying these users here, you must also grant them the CloudWatchOpenSearchDashboardAccess IAM policy. For more information, see IAM policies for users.
Defines the type of log that the source is sending.
For Amazon Bedrock, the valid value is APPLICATION_LOGS
.
For Amazon CodeWhisperer, the valid value is EVENT_LOGS
.
For IAM Identity Center, the valid value is ERROR_LOGS
.
For Amazon WorkMail, the valid values are ACCESS_CONTROL_LOGS
, AUTHENTICATION_LOGS
, WORKMAIL_AVAILABILITY_PROVIDER_LOGS
, and WORKMAIL_MAILBOX_ACCESS_LOGS
.
Defines the type of log that the source is sending.
For Amazon Bedrock, the valid value is APPLICATION_LOGS
.
For CloudFront, the valid value is ACCESS_LOGS
.
For Amazon CodeWhisperer, the valid value is EVENT_LOGS
.
For Elemental MediaPackage, the valid values are EGRESS_ACCESS_LOGS
and INGRESS_ACCESS_LOGS
.
For Elemental MediaTailor, the valid values are AD_DECISION_SERVER_LOGS
, MANIFEST_SERVICE_LOGS
, and TRANSCODE_LOGS
.
For IAM Identity Center, the valid value is ERROR_LOGS
.
For Amazon Q, the valid value is EVENT_LOGS
.
For Amazon SES mail manager, the valid value is APPLICATION_LOG
.
For Amazon WorkMail, the valid values are ACCESS_CONTROL_LOGS
, AUTHENTICATION_LOGS
, WORKMAIL_AVAILABILITY_PROVIDER_LOGS
, WORKMAIL_MAILBOX_ACCESS_LOGS
, and WORKMAIL_PERSONAL_ACCESS_TOKEN_LOGS
.
This string allows re-configuring the S3 object prefix to contain either static or variable sections. The valid variables to use in the suffix path will vary by each log source. See ConfigurationTemplate$allowedSuffixPathFields for more info on what values are supported in the suffix path for each log source.
" + "documentation":"This string allows re-configuring the S3 object prefix to contain either static or variable sections. The valid variables to use in the suffix path will vary by each log source. To find the values supported for the suffix path for each log source, use the DescribeConfigurationTemplates operation and check the allowedSuffixPathFields
field in the response.
his exception is returned if an unknown error occurs during a Live Tail session.
", + "documentation":"This exception is returned if an unknown error occurs during a Live Tail session.
", "exception":true }, "SessionTimeoutException":{ diff --git a/botocore/data/quicksight/2018-04-01/service-2.json b/botocore/data/quicksight/2018-04-01/service-2.json index 2668fd01ea..fe9f13996b 100644 --- a/botocore/data/quicksight/2018-04-01/service-2.json +++ b/botocore/data/quicksight/2018-04-01/service-2.json @@ -16920,6 +16920,13 @@ }, "documentation":"The configuration of destination parameter values.
This is a union type structure. For this structure to be valid, only one of the attributes can be defined.
" }, + "DigitGroupingStyle":{ + "type":"string", + "enum":[ + "DEFAULT", + "LAKHS" + ] + }, "DimensionField":{ "type":"structure", "members":{ @@ -25217,7 +25224,9 @@ "THOUSANDS", "MILLIONS", "BILLIONS", - "TRILLIONS" + "TRILLIONS", + "LAKHS", + "CRORES" ] }, "NumericAxisOptions":{ @@ -31878,7 +31887,7 @@ "TableFieldOptionList":{ "type":"list", "member":{"shape":"TableFieldOption"}, - "max":100 + "max":201 }, "TableFieldOptions":{ "type":"structure", @@ -32089,11 +32098,16 @@ "SCROLLED" ] }, + "TableUnaggregatedFieldList":{ + "type":"list", + "member":{"shape":"UnaggregatedField"}, + "max":201 + }, "TableUnaggregatedFieldWells":{ "type":"structure", "members":{ "Values":{ - "shape":"UnaggregatedFieldList", + "shape":"TableUnaggregatedFieldList", "documentation":"The values field well for a pivot table. Values are unaggregated for an unaggregated table.
" } }, @@ -32846,6 +32860,10 @@ "Visibility":{ "shape":"Visibility", "documentation":"Determines the visibility of the thousands separator.
" + }, + "GroupingStyle":{ + "shape":"DigitGroupingStyle", + "documentation":"Determines the way numbers are styled to accommodate different readability standards. The DEFAULT
value uses the standard international grouping system and groups numbers by the thousands. The LAKHS
value uses the Indian numbering system and groups numbers by lakhs and crores.
The options that determine the thousands separator configuration.
" diff --git a/botocore/data/sns/2010-03-31/service-2.json b/botocore/data/sns/2010-03-31/service-2.json index 8ff9f4cd24..70dd2549b9 100644 --- a/botocore/data/sns/2010-03-31/service-2.json +++ b/botocore/data/sns/2010-03-31/service-2.json @@ -1099,7 +1099,7 @@ }, "Attributes":{ "shape":"TopicAttributesMap", - "documentation":"A map of attributes with their corresponding values.
The following lists names, descriptions, and values of the special request parameters that the CreateTopic
action uses:
DeliveryPolicy
– The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.
DisplayName
– The display name to use for a topic with SMS subscriptions.
FifoTopic
– Set to true to create a FIFO topic.
Policy
– The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic.
SignatureVersion
– The signature version corresponds to the hashing algorithm used while creating the signature of the notifications, subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS. By default, SignatureVersion
is set to 1
.
TracingConfig
– Tracing mode of an Amazon SNS topic. By default TracingConfig
is set to PassThrough
, and the topic passes through the tracing header it receives from an Amazon SNS publisher to its subscriptions. If set to Active
, Amazon SNS will vend X-Ray segment data to topic owner account if the sampled flag in the tracing header is true. This is only supported on standard topics.
The following attribute applies only to server-side encryption:
KmsMasterKeyId
– The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the Key Management Service API Reference.
The following attributes apply only to FIFO topics:
ArchivePolicy
– The policy that sets the retention period for messages stored in the message archive of an Amazon SNS FIFO topic.
ContentBasedDeduplication
– Enables content-based deduplication for FIFO topics.
By default, ContentBasedDeduplication
is set to false
. If you create a FIFO topic and this attribute is false
, you must specify a value for the MessageDeduplicationId
parameter for the Publish action.
When you set ContentBasedDeduplication
to true
, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId
using the body of the message (but not the attributes of the message).
(Optional) To override the generated value, you can specify a value for the MessageDeduplicationId
parameter for the Publish
action.
A map of attributes with their corresponding values.
The following lists names, descriptions, and values of the special request parameters that the CreateTopic
action uses:
DeliveryPolicy
– The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.
DisplayName
– The display name to use for a topic with SMS subscriptions.
FifoTopic
– Set to true to create a FIFO topic.
Policy
– The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic.
SignatureVersion
– The signature version corresponds to the hashing algorithm used while creating the signature of the notifications, subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS. By default, SignatureVersion
is set to 1
.
TracingConfig
– Tracing mode of an Amazon SNS topic. By default TracingConfig
is set to PassThrough
, and the topic passes through the tracing header it receives from an Amazon SNS publisher to its subscriptions. If set to Active
, Amazon SNS will vend X-Ray segment data to topic owner account if the sampled flag in the tracing header is true. This is only supported on standard topics.
The following attribute applies only to server-side encryption:
KmsMasterKeyId
– The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the Key Management Service API Reference.
The following attributes apply only to FIFO topics:
ArchivePolicy
– The policy that sets the retention period for messages stored in the message archive of an Amazon SNS FIFO topic.
ContentBasedDeduplication
– Enables content-based deduplication for FIFO topics.
By default, ContentBasedDeduplication
is set to false
. If you create a FIFO topic and this attribute is false
, you must specify a value for the MessageDeduplicationId
parameter for the Publish action.
When you set ContentBasedDeduplication
to true
, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId
using the body of the message (but not the attributes of the message).
(Optional) To override the generated value, you can specify a value for the MessageDeduplicationId
parameter for the Publish
action.
FifoThroughputScope
– Enables higher throughput for your FIFO topic by adjusting the scope of deduplication. This attribute has two possible values:
Topic
– The scope of message deduplication is across the entire topic. This is the default value and maintains existing behavior, with a maximum throughput of 3000 messages per second or 20MB per second, whichever comes first.
MessageGroup
– The scope of deduplication is within each individual message group, which enables higher throughput per topic subject to regional quotas. For more information on quotas or to request an increase, see Amazon SNS service quotas in the Amazon Web Services General Reference.
This parameter applies only to FIFO (first-in-first-out) topics.
The token used for deduplication of messages within a 5-minute minimum deduplication interval. If a message with a particular MessageDeduplicationId
is sent successfully, subsequent messages with the same MessageDeduplicationId
are accepted successfully but aren't delivered.
Every message must have a unique MessageDeduplicationId
.
You may provide a MessageDeduplicationId
explicitly.
If you aren't able to provide a MessageDeduplicationId
and you enable ContentBasedDeduplication
for your topic, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId
using the body of the message (but not the attributes of the message).
If you don't provide a MessageDeduplicationId
and the topic doesn't have ContentBasedDeduplication
set, the action fails with an error.
If the topic has a ContentBasedDeduplication
set, your MessageDeduplicationId
overrides the generated one.
When ContentBasedDeduplication
is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.
If you send one message with ContentBasedDeduplication
enabled, and then another message with a MessageDeduplicationId
that is the same as the one generated for the first MessageDeduplicationId
, the two messages are treated as duplicates and only one copy of the message is delivered.
The MessageDeduplicationId
is available to the consumer of the message (this can be useful for troubleshooting delivery issues).
If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId
after the deduplication interval, Amazon SNS can't detect duplicate messages.
Amazon SNS continues to keep track of the message deduplication ID even after the message is received and deleted.
The length of MessageDeduplicationId
is 128 characters.
MessageDeduplicationId
can contain alphanumeric characters (a-z, A-Z, 0-9)
and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~)
.
This parameter applies only to FIFO (first-in-first-out) topics.
This parameter applies only to FIFO (first-in-first-out) topics. The MessageDeduplicationId
can contain up to 128 alphanumeric characters (a-z, A-Z, 0-9)
and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~)
.
Every message must have a unique MessageDeduplicationId
, which is a token used for deduplication of sent messages within the 5 minute minimum deduplication interval.
The scope of deduplication depends on the FifoThroughputScope
attribute, when set to Topic
the message deduplication scope is across the entire topic, when set to MessageGroup
the message deduplication scope is within each individual message group.
If a message with a particular MessageDeduplicationId
is sent successfully, subsequent messages within the deduplication scope and interval, with the same MessageDeduplicationId
, are accepted successfully but aren't delivered.
Every message must have a unique MessageDeduplicationId
.
You may provide a MessageDeduplicationId
explicitly.
If you aren't able to provide a MessageDeduplicationId
and you enable ContentBasedDeduplication
for your topic, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId
using the body of the message (but not the attributes of the message).
If you don't provide a MessageDeduplicationId
and the topic doesn't have ContentBasedDeduplication
set, the action fails with an error.
If the topic has a ContentBasedDeduplication
set, your MessageDeduplicationId
overrides the generated one.
When ContentBasedDeduplication
is in effect, messages with identical content sent within the deduplication scope and interval are treated as duplicates and only one copy of the message is delivered.
If you send one message with ContentBasedDeduplication
enabled, and then another message with a MessageDeduplicationId
that is the same as the one generated for the first MessageDeduplicationId
, the two messages are treated as duplicates, within the deduplication scope and interval, and only one copy of the message is delivered.
The MessageDeduplicationId
is available to the consumer of the message (this can be useful for troubleshooting delivery issues).
If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId
after the deduplication interval, Amazon SNS can't detect duplicate messages.
Amazon SNS continues to keep track of the message deduplication ID even after the message is received and deleted.
This parameter applies only to FIFO (first-in-first-out) topics. The MessageDeduplicationId
can contain up to 128 alphanumeric characters (a-z, A-Z, 0-9)
and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~)
.
Every message must have a unique MessageDeduplicationId
, which is a token used for deduplication of sent messages. If a message with a particular MessageDeduplicationId
is sent successfully, any message sent with the same MessageDeduplicationId
during the 5-minute deduplication interval is treated as a duplicate.
If the topic has ContentBasedDeduplication
set, the system generates a MessageDeduplicationId
based on the contents of the message. Your MessageDeduplicationId
overrides the generated one.
This parameter applies only to FIFO (first-in-first-out) topics. The MessageDeduplicationId
can contain up to 128 alphanumeric characters (a-z, A-Z, 0-9)
and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~)
.
Every message must have a unique MessageDeduplicationId
, which is a token used for deduplication of sent messages within the 5 minute minimum deduplication interval.
The scope of deduplication depends on the FifoThroughputScope
attribute, when set to Topic
the message deduplication scope is across the entire topic, when set to MessageGroup
the message deduplication scope is within each individual message group.
If a message with a particular MessageDeduplicationId
is sent successfully, subsequent messages within the deduplication scope and interval, with the same MessageDeduplicationId
, are accepted successfully but aren't delivered.
Every message must have a unique MessageDeduplicationId
:
You may provide a MessageDeduplicationId
explicitly.
If you aren't able to provide a MessageDeduplicationId
and you enable ContentBasedDeduplication
for your topic, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId
using the body of the message (but not the attributes of the message).
If you don't provide a MessageDeduplicationId
and the topic doesn't have ContentBasedDeduplication
set, the action fails with an error.
If the topic has a ContentBasedDeduplication
set, your MessageDeduplicationId
overrides the generated one.
When ContentBasedDeduplication
is in effect, messages with identical content sent within the deduplication scope and interval are treated as duplicates and only one copy of the message is delivered.
If you send one message with ContentBasedDeduplication
enabled, and then another message with a MessageDeduplicationId
that is the same as the one generated for the first MessageDeduplicationId
, the two messages are treated as duplicates, within the deduplication scope and interval, and only one copy of the message is delivered.
A map of attributes with their corresponding values.
The following lists the names, descriptions, and values of the special request parameters that the SetTopicAttributes
action uses:
ApplicationSuccessFeedbackRoleArn
– Indicates failed message delivery status for an Amazon SNS topic that is subscribed to a platform application endpoint.
DeliveryPolicy
– The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.
DisplayName
– The display name to use for a topic with SMS subscriptions.
Policy
– The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic.
TracingConfig
– Tracing mode of an Amazon SNS topic. By default TracingConfig
is set to PassThrough
, and the topic passes through the tracing header it receives from an Amazon SNS publisher to its subscriptions. If set to Active
, Amazon SNS will vend X-Ray segment data to topic owner account if the sampled flag in the tracing header is true. This is only supported on standard topics.
HTTP
HTTPSuccessFeedbackRoleArn
– Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an HTTP endpoint.
HTTPSuccessFeedbackSampleRate
– Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an HTTP endpoint.
HTTPFailureFeedbackRoleArn
– Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an HTTP endpoint.
Amazon Kinesis Data Firehose
FirehoseSuccessFeedbackRoleArn
– Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Amazon Kinesis Data Firehose endpoint.
FirehoseSuccessFeedbackSampleRate
– Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Amazon Kinesis Data Firehose endpoint.
FirehoseFailureFeedbackRoleArn
– Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Amazon Kinesis Data Firehose endpoint.
Lambda
LambdaSuccessFeedbackRoleArn
– Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Lambda endpoint.
LambdaSuccessFeedbackSampleRate
– Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Lambda endpoint.
LambdaFailureFeedbackRoleArn
– Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Lambda endpoint.
Platform application endpoint
ApplicationSuccessFeedbackRoleArn
– Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Amazon Web Services application endpoint.
ApplicationSuccessFeedbackSampleRate
– Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Amazon Web Services application endpoint.
ApplicationFailureFeedbackRoleArn
– Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Amazon Web Services application endpoint.
In addition to being able to configure topic attributes for message delivery status of notification messages sent to Amazon SNS application endpoints, you can also configure application attributes for the delivery status of push notification messages sent to push notification services.
For example, For more information, see Using Amazon SNS Application Attributes for Message Delivery Status.
Amazon SQS
SQSSuccessFeedbackRoleArn
– Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Amazon SQS endpoint.
SQSSuccessFeedbackSampleRate
– Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Amazon SQS endpoint.
SQSFailureFeedbackRoleArn
– Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Amazon SQS endpoint.
The <ENDPOINT>SuccessFeedbackRoleArn and <ENDPOINT>FailureFeedbackRoleArn attributes are used to give Amazon SNS write access to use CloudWatch Logs on your behalf. The <ENDPOINT>SuccessFeedbackSampleRate attribute is for specifying the sample rate percentage (0-100) of successfully delivered messages. After you configure the <ENDPOINT>FailureFeedbackRoleArn attribute, then all failed message deliveries generate CloudWatch Logs.
The following attribute applies only to server-side-encryption:
KmsMasterKeyId
– The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the Key Management Service API Reference.
SignatureVersion
– The signature version corresponds to the hashing algorithm used while creating the signature of the notifications, subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS. By default, SignatureVersion
is set to 1
.
The following attribute applies only to FIFO topics:
ArchivePolicy
– The policy that sets the retention period for messages stored in the message archive of an Amazon SNS FIFO topic.
ContentBasedDeduplication
– Enables content-based deduplication for FIFO topics.
By default, ContentBasedDeduplication
is set to false
. If you create a FIFO topic and this attribute is false
, you must specify a value for the MessageDeduplicationId
parameter for the Publish action.
When you set ContentBasedDeduplication
to true
, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId
using the body of the message (but not the attributes of the message).
(Optional) To override the generated value, you can specify a value for the MessageDeduplicationId
parameter for the Publish
action.
A map of attributes with their corresponding values.
The following lists the names, descriptions, and values of the special request parameters that the SetTopicAttributes
action uses:
ApplicationSuccessFeedbackRoleArn
– Indicates failed message delivery status for an Amazon SNS topic that is subscribed to a platform application endpoint.
DeliveryPolicy
– The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.
DisplayName
– The display name to use for a topic with SMS subscriptions.
Policy
– The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic.
TracingConfig
– Tracing mode of an Amazon SNS topic. By default TracingConfig
is set to PassThrough
, and the topic passes through the tracing header it receives from an Amazon SNS publisher to its subscriptions. If set to Active
, Amazon SNS will vend X-Ray segment data to topic owner account if the sampled flag in the tracing header is true. This is only supported on standard topics.
HTTP
HTTPSuccessFeedbackRoleArn
– Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an HTTP endpoint.
HTTPSuccessFeedbackSampleRate
– Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an HTTP endpoint.
HTTPFailureFeedbackRoleArn
– Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an HTTP endpoint.
Amazon Kinesis Data Firehose
FirehoseSuccessFeedbackRoleArn
– Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Amazon Kinesis Data Firehose endpoint.
FirehoseSuccessFeedbackSampleRate
– Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Amazon Kinesis Data Firehose endpoint.
FirehoseFailureFeedbackRoleArn
– Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Amazon Kinesis Data Firehose endpoint.
Lambda
LambdaSuccessFeedbackRoleArn
– Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Lambda endpoint.
LambdaSuccessFeedbackSampleRate
– Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Lambda endpoint.
LambdaFailureFeedbackRoleArn
– Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Lambda endpoint.
Platform application endpoint
ApplicationSuccessFeedbackRoleArn
– Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Amazon Web Services application endpoint.
ApplicationSuccessFeedbackSampleRate
– Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Amazon Web Services application endpoint.
ApplicationFailureFeedbackRoleArn
– Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Amazon Web Services application endpoint.
In addition to being able to configure topic attributes for message delivery status of notification messages sent to Amazon SNS application endpoints, you can also configure application attributes for the delivery status of push notification messages sent to push notification services.
For example, For more information, see Using Amazon SNS Application Attributes for Message Delivery Status.
Amazon SQS
SQSSuccessFeedbackRoleArn
– Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Amazon SQS endpoint.
SQSSuccessFeedbackSampleRate
– Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Amazon SQS endpoint.
SQSFailureFeedbackRoleArn
– Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Amazon SQS endpoint.
The <ENDPOINT>SuccessFeedbackRoleArn and <ENDPOINT>FailureFeedbackRoleArn attributes are used to give Amazon SNS write access to use CloudWatch Logs on your behalf. The <ENDPOINT>SuccessFeedbackSampleRate attribute is for specifying the sample rate percentage (0-100) of successfully delivered messages. After you configure the <ENDPOINT>FailureFeedbackRoleArn attribute, then all failed message deliveries generate CloudWatch Logs.
The following attribute applies only to server-side-encryption:
KmsMasterKeyId
– The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the Key Management Service API Reference.
SignatureVersion
– The signature version corresponds to the hashing algorithm used while creating the signature of the notifications, subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS. By default, SignatureVersion
is set to 1
.
The following attribute applies only to FIFO topics:
ArchivePolicy
– The policy that sets the retention period for messages stored in the message archive of an Amazon SNS FIFO topic.
ContentBasedDeduplication
– Enables content-based deduplication for FIFO topics.
By default, ContentBasedDeduplication
is set to false
. If you create a FIFO topic and this attribute is false
, you must specify a value for the MessageDeduplicationId
parameter for the Publish action.
When you set ContentBasedDeduplication
to true
, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId
using the body of the message (but not the attributes of the message).
(Optional) To override the generated value, you can specify a value for the MessageDeduplicationId
parameter for the Publish
action.
FifoThroughputScope
– Enables higher throughput for your FIFO topic by adjusting the scope of deduplication. This attribute has two possible values:
Topic
– The scope of message deduplication is across the entire topic. This is the default value and maintains existing behavior, with a maximum throughput of 3000 messages per second or 20MB per second, whichever comes first.
MessageGroup
– The scope of deduplication is within each individual message group, which enables higher throughput per topic subject to regional quotas. For more information on quotas or to request an increase, see Amazon SNS service quotas in the Amazon Web Services General Reference.