From dc24ad8ce3ac9565d37460dfc8611faeb853681b Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 27 Nov 2024 01:04:34 +0000 Subject: [PATCH] chore(schema): update (#3692) Co-authored-by: github-actions --- samtranslator/schema/schema.json | 176 ++---- schema_source/cloudformation-docs.json | 768 +++++++++++++++++++---- schema_source/cloudformation.schema.json | 176 ++---- 3 files changed, 761 insertions(+), 359 deletions(-) diff --git a/samtranslator/schema/schema.json b/samtranslator/schema/schema.json index 8be72d06a..5fbdfbb4d 100644 --- a/samtranslator/schema/schema.json +++ b/samtranslator/schema/schema.json @@ -1718,7 +1718,7 @@ "properties": { "AnalyzerConfiguration": { "$ref": "#/definitions/AWS::AccessAnalyzer::Analyzer.AnalyzerConfiguration", - "markdownDescription": "Contains information about the configuration of an unused access analyzer for an AWS organization or account.", + "markdownDescription": "Contains information about the configuration of an analyzer for an AWS organization or account.", "title": "AnalyzerConfiguration" }, "AnalyzerName": { @@ -1738,7 +1738,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An array of key-value pairs to apply to the analyzer.", + "markdownDescription": "An array of key-value pairs to apply to the analyzer. You can use the set of Unicode letters, digits, whitespace, `_` , `.` , `/` , `=` , `+` , and `-` .\n\nFor the tag key, you can specify a value that is 1 to 128 characters in length and cannot be prefixed with `aws:` .\n\nFor the tag value, you can specify a value that is 0 to 256 characters in length.", "title": "Tags", "type": "array" }, @@ -1779,7 +1779,7 @@ "properties": { "UnusedAccessConfiguration": { "$ref": "#/definitions/AWS::AccessAnalyzer::Analyzer.UnusedAccessConfiguration", - "markdownDescription": "Specifies the configuration of an unused access analyzer for an AWS organization or account. External access analyzers do not support any configuration.", + "markdownDescription": "Specifies the configuration of an unused access analyzer for an AWS organization or account.", "title": "UnusedAccessConfiguration" } }, @@ -1855,7 +1855,7 @@ "additionalProperties": false, "properties": { "UnusedAccessAge": { - "markdownDescription": "The specified access age in days for which to generate findings for unused access. For example, if you specify 90 days, the analyzer will generate findings for IAM entities within the accounts of the selected organization for any access that hasn't been used in 90 or more days since the analyzer's last scan. You can choose a value between 1 and 180 days.", + "markdownDescription": "The specified access age in days for which to generate findings for unused access. For example, if you specify 90 days, the analyzer will generate findings for IAM entities within the accounts of the selected organization for any access that hasn't been used in 90 or more days since the analyzer's last scan. You can choose a value between 1 and 365 days.", "title": "UnusedAccessAge", "type": "number" } @@ -27290,7 +27290,7 @@ "type": "string" }, "Version": { - "markdownDescription": "The version number of the launch template, `$Latest` , or `$Default` .\n\nIf the value is `$Latest` , the latest version of the launch template is used. If the value is `$Default` , the default version of the launch template is used.\n\n> If the AMI ID that's used in a compute environment is from the launch template, the AMI isn't changed when the compute environment is updated. It's only changed if the `updateToLatestImageVersion` parameter for the compute environment is set to `true` . During an infrastructure update, if either `$Latest` or `$Default` is specified, AWS Batch re-evaluates the launch template version, and it might use a different version of the launch template. This is the case even if the launch template isn't specified in the update. When updating a compute environment, changing the launch template requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* . \n\nDefault: `$Default` .", + "markdownDescription": "The version number of the launch template, `$Default` , or `$Latest` .\n\nIf the value is `$Default` , the default version of the launch template is used. If the value is `$Latest` , the latest version of the launch template is used.\n\n> If the AMI ID that's used in a compute environment is from the launch template, the AMI isn't changed when the compute environment is updated. It's only changed if the `updateToLatestImageVersion` parameter for the compute environment is set to `true` . During an infrastructure update, if either `$Default` or `$Latest` is specified, AWS Batch re-evaluates the launch template version, and it might use a different version of the launch template. This is the case even if the launch template isn't specified in the update. When updating a compute environment, changing the launch template requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* . \n\nDefault: `$Default`\n\nLatest: `$Latest`", "title": "Version", "type": "string" } @@ -34424,7 +34424,7 @@ "additionalProperties": false, "properties": { "TypeName": { - "markdownDescription": "The name of the hook.\n\nYou must specify either `TypeVersionArn` , or `TypeName` and `VersionId` .", + "markdownDescription": "The name of the Hook.\n\nYou must specify either `TypeVersionArn` , or `TypeName` and `VersionId` .", "title": "TypeName", "type": "string" }, @@ -34497,22 +34497,22 @@ "additionalProperties": false, "properties": { "Configuration": { - "markdownDescription": "Specifies the activated hook type configuration, in this AWS account and AWS Region .\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` .", + "markdownDescription": "Specifies the activated Hook type configuration, in this AWS account and AWS Region .\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` .", "title": "Configuration", "type": "string" }, "ConfigurationAlias": { - "markdownDescription": "Specifies the activated hook type configuration, in this AWS account and AWS Region .\n\nDefaults to `default` alias. Hook types currently support default configuration alias.", + "markdownDescription": "Specifies the activated Hook type configuration, in this AWS account and AWS Region .\n\nDefaults to `default` alias. Hook types currently support default configuration alias.", "title": "ConfigurationAlias", "type": "string" }, "TypeArn": { - "markdownDescription": "The Amazon Resource Number (ARN) for the hook to set `Configuration` for.\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` .", + "markdownDescription": "The Amazon Resource Number (ARN) for the Hook to set `Configuration` for.\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` .", "title": "TypeArn", "type": "string" }, "TypeName": { - "markdownDescription": "The unique name for your hook. Specifies a three-part namespace for your hook, with a recommended pattern of `Organization::Service::Hook` .\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` .", + "markdownDescription": "The unique name for your Hook. Specifies a three-part namespace for your Hook, with a recommended pattern of `Organization::Service::Hook` .\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` .", "title": "TypeName", "type": "string" } @@ -34579,7 +34579,7 @@ "additionalProperties": false, "properties": { "ExecutionRoleArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the task execution role that grants the hook permission.", + "markdownDescription": "The Amazon Resource Name (ARN) of the task execution role that grants the Hook permission.", "title": "ExecutionRoleArn", "type": "string" }, @@ -34589,7 +34589,7 @@ "title": "LoggingConfig" }, "SchemaHandlerPackage": { - "markdownDescription": "A URL to the Amazon S3 bucket containing the hook project package that contains the necessary files for the hook you want to register.\n\nFor information on generating a schema handler package for the resource you want to register, see [submit](https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/resource-type-cli-submit.html) in the *CloudFormation CLI User Guide for Extension Development* .\n\n> The user registering the resource must be able to access the package in the S3 bucket. That's, the user must have [GetObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) permissions for the schema handler package. For more information, see [Actions, Resources, and Condition Keys for Amazon S3](https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazons3.html) in the *AWS Identity and Access Management User Guide* .", + "markdownDescription": "A URL to the Amazon S3 bucket containing the Hook project package that contains the necessary files for the Hook you want to register.\n\nFor information on generating a schema handler package for the resource you want to register, see [submit](https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/resource-type-cli-submit.html) in the *CloudFormation CLI User Guide for Extension Development* .\n\n> The user registering the resource must be able to access the package in the S3 bucket. That's, the user must have [GetObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) permissions for the schema handler package. For more information, see [Actions, Resources, and Condition Keys for Amazon S3](https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazons3.html) in the *AWS Identity and Access Management User Guide* .", "title": "SchemaHandlerPackage", "type": "string" }, @@ -37090,7 +37090,7 @@ "additionalProperties": false, "properties": { "Bucket": { - "markdownDescription": "The Amazon S3 bucket to store the access logs in, for example, `myawslogbucket.s3.amazonaws.com` .", + "markdownDescription": "The Amazon S3 bucket to store the access logs in, for example, `amzn-s3-demo-bucket.s3.amazonaws.com` .", "title": "Bucket", "type": "string" }, @@ -38892,7 +38892,7 @@ "additionalProperties": false, "properties": { "Bucket": { - "markdownDescription": "The Amazon S3 bucket to store the access logs in, for example, `myawslogbucket.s3.amazonaws.com` .", + "markdownDescription": "The Amazon S3 bucket to store the access logs in, for example, `amzn-s3-demo-bucket.s3.amazonaws.com` .", "title": "Bucket", "type": "string" }, @@ -39289,7 +39289,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events and network activity events.\n\nFor management events, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` . The following additional fields are available for event data stores: `eventName` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` . The following additional fields are available for event data stores: `eventSource` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events, data events (for event data stores only), and network activity events.\n\nFor management events for trails, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor management and data events for event data stores, you can use it to include or exclude any event source and can use any operator.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events, management events (for event data stores only), and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For events outside of AWS , the value must be `ActivityAuditLog` .\n- *`eventType`* - This is an optional field available only for event data stores, which is used to filter management and data events on the event type. For information about available event types, see [CloudTrail record contents](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-record-contents.html#ct-event-type) in the *AWS CloudTrail user guide* .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`sessionCredentialFromConsole`* - This is an optional field available only for event data stores, which is used to filter management and data events based on whether the events originated from an AWS Management Console session. `sessionCredentialFromConsole` can only use the `Equals` and `NotEquals` operators.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`userIdentity.arn`* - This is an optional field available only for event data stores, which is used to filter management and data events on the userIdentity ARN. You can use any operator with `userIdentity.arn` . For more information on the userIdentity element, see [CloudTrail userIdentity element](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-user-identity.html) in the *AWS CloudTrail User Guide* .\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", "title": "Field", "type": "string" }, @@ -39612,7 +39612,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events and network activity events.\n\nFor management events, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` . The following additional fields are available for event data stores: `eventName` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` . The following additional fields are available for event data stores: `eventSource` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events, data events (for event data stores only), and network activity events.\n\nFor management events for trails, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor management and data events for event data stores, you can use it to include or exclude any event source and can use any operator.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events, management events (for event data stores only), and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For events outside of AWS , the value must be `ActivityAuditLog` .\n- *`eventType`* - This is an optional field available only for event data stores, which is used to filter management and data events on the event type. For information about available event types, see [CloudTrail record contents](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-record-contents.html#ct-event-type) in the *AWS CloudTrail user guide* .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`sessionCredentialFromConsole`* - This is an optional field available only for event data stores, which is used to filter management and data events based on whether the events originated from an AWS Management Console session. `sessionCredentialFromConsole` can only use the `Equals` and `NotEquals` operators.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`userIdentity.arn`* - This is an optional field available only for event data stores, which is used to filter management and data events on the userIdentity ARN. You can use any operator with `userIdentity.arn` . For more information on the userIdentity element, see [CloudTrail userIdentity element](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-user-identity.html) in the *AWS CloudTrail User Guide* .\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", "title": "Field", "type": "string" }, @@ -41236,7 +41236,7 @@ "type": "string" }, "EnvironmentType": { - "markdownDescription": "The environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `MAC_ARM` is available only in regions US East (Ohio), US East (N. Virginia), US West (Oregon), Europe (Frankfurt), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* .", + "markdownDescription": "The environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo).\n- The environment type `ARM_EC2` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_EC2` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `MAC_ARM` is available only in regions US East (Ohio), US East (N. Virginia), US West (Oregon), Europe (Frankfurt), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_EC2` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* .", "title": "EnvironmentType", "type": "string" }, @@ -41938,7 +41938,7 @@ "type": "string" }, "ReportBuildStatus": { - "markdownDescription": "Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, or Bitbucket. If this is set and you use a different source provider, an `invalidInputException` is thrown.", + "markdownDescription": "Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, or Bitbucket. If this is set and you use a different source provider, an `invalidInputException` is thrown.", "title": "ReportBuildStatus", "type": "boolean" }, @@ -63684,22 +63684,22 @@ "additionalProperties": false, "properties": { "Content": { - "markdownDescription": "The content of the metadata form.", + "markdownDescription": "", "title": "Content", "type": "string" }, "FormName": { - "markdownDescription": "The name of the metadata form.", + "markdownDescription": "", "title": "FormName", "type": "string" }, "TypeIdentifier": { - "markdownDescription": "The ID of the metadata form type.", + "markdownDescription": "", "title": "TypeIdentifier", "type": "string" }, "TypeRevision": { - "markdownDescription": "The revision of the metadata form type.", + "markdownDescription": "", "title": "TypeRevision", "type": "string" } @@ -64774,12 +64774,12 @@ "additionalProperties": false, "properties": { "Max": { - "markdownDescription": "The maximum GPU for the accelerator.", + "markdownDescription": "The maximum number of GPU accelerators in the worker host.", "title": "Max", "type": "number" }, "Min": { - "markdownDescription": "The minimum GPU for the accelerator.", + "markdownDescription": "The minimum number of GPU accelerators in the worker host.", "title": "Min", "type": "number" } @@ -67941,7 +67941,7 @@ }, "ImportSourceSpecification": { "$ref": "#/definitions/AWS::DynamoDB::Table.ImportSourceSpecification", - "markdownDescription": "Specifies the properties of data being imported from the S3 bucket source to the table.\n\n> If you specify the `ImportSourceSpecification` property, and also specify either the `StreamSpecification` , the `TableClass` property, or the `DeletionProtectionEnabled` property, the IAM entity creating/updating stack must have `UpdateTable` permission.", + "markdownDescription": "Specifies the properties of data being imported from the S3 bucket source to the\" table.\n\n> If you specify the `ImportSourceSpecification` property, and also specify either the `StreamSpecification` , the `TableClass` property, the `DeletionProtectionEnabled` property, or the `WarmThroughput` property, the IAM entity creating/updating stack must have `UpdateTable` permission.", "title": "ImportSourceSpecification" }, "KeySchema": { @@ -76339,7 +76339,7 @@ "type": "array" }, "MaxEntries": { - "markdownDescription": "The maximum number of entries for the prefix list.", + "markdownDescription": "The maximum number of entries for the prefix list. You can't modify the entries and the size of a prefix list at the same time.\n\nThis property is required when you create a prefix list.", "title": "MaxEntries", "type": "number" }, @@ -83583,7 +83583,7 @@ "type": "boolean" }, "HealthCheckGracePeriodSeconds": { - "markdownDescription": "The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy Elastic Load Balancing target health checks after a task has first started. This is only used when your service is configured to use a load balancer. If your service has a load balancer defined and you don't specify a health check grace period value, the default value of `0` is used.\n\nIf you do not use an Elastic Load Balancing, we recommend that you use the `startPeriod` in the task definition health check parameters. For more information, see [Health check](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_HealthCheck.html) .\n\nIf your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can specify a health check grace period of up to 2,147,483,647 seconds (about 69 years). During that time, the Amazon ECS service scheduler ignores health check status. This grace period can prevent the service scheduler from marking tasks as unhealthy and stopping them before they have time to come up.", + "markdownDescription": "The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy Elastic Load Balancing, VPC Lattice, and container health checks after a task has first started. If you don't specify a health check grace period value, the default value of `0` is used. If you don't use any of the health checks, then `healthCheckGracePeriodSeconds` is unused.\n\nIf your service's tasks take a while to start and respond to health checks, you can specify a health check grace period of up to 2,147,483,647 seconds (about 69 years). During that time, the Amazon ECS service scheduler ignores health check status. This grace period can prevent the service scheduler from marking tasks as unhealthy and stopping them before they have time to come up.", "title": "HealthCheckGracePeriodSeconds", "type": "number" }, @@ -85138,7 +85138,7 @@ "type": "number" }, "Name": { - "markdownDescription": "The name that's used for the port mapping. This parameter only applies to Service Connect. This parameter is the name that you use in the `serviceConnectConfiguration` of a service. The name can include up to 64 characters. The characters can include lowercase letters, numbers, underscores (_), and hyphens (-). The name can't start with a hyphen.\n\nFor more information, see [Service Connect](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html) in the *Amazon Elastic Container Service Developer Guide* .", + "markdownDescription": "The name that's used for the port mapping. This parameter is the name that you use in the `serviceConnectConfiguration` and the `vpcLatticeConfigurations` of a service. The name can include up to 64 characters. The characters can include lowercase letters, numbers, underscores (_), and hyphens (-). The name can't start with a hyphen.", "title": "Name", "type": "string" }, @@ -102965,8 +102965,6 @@ "items": { "$ref": "#/definitions/AWS::GameLift::ContainerGroupDefinition.ContainerDefinition" }, - "markdownDescription": "The set of container definitions that are included in the container group.", - "title": "ContainerDefinitions", "type": "array" }, "Name": { @@ -102975,13 +102973,11 @@ "type": "string" }, "OperatingSystem": { - "markdownDescription": "The platform required for all containers in the container group definition.\n\n> Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the [Amazon Linux 2 FAQs](https://docs.aws.amazon.com/https://aws.amazon.com/amazon-linux-2/faqs/) . For game servers that are hosted on AL2 and use Amazon GameLift server SDK 4.x., first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See [Migrate to Amazon GameLift server SDK version 5.](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-serversdk5-migration.html)", + "markdownDescription": "The platform that all containers in the container group definition run on.\n\n> Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the [Amazon Linux 2 FAQs](https://docs.aws.amazon.com/https://aws.amazon.com/amazon-linux-2/faqs/) . For game servers that are hosted on AL2 and use Amazon GameLift server SDK 4.x, first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See [Migrate to Amazon GameLift server SDK version 5.](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-serversdk5-migration.html)", "title": "OperatingSystem", "type": "string" }, "SchedulingStrategy": { - "markdownDescription": "The method for deploying the container group across fleet instances. A replica container group might have multiple copies on each fleet instance. A daemon container group maintains only one copy per fleet instance.", - "title": "SchedulingStrategy", "type": "string" }, "Tags": { @@ -102993,13 +102989,9 @@ "type": "array" }, "TotalCpuLimit": { - "markdownDescription": "The amount of CPU units on a fleet instance to allocate for the container group. All containers in the group share these resources. This property is an integer value in CPU units (1 vCPU is equal to 1024 CPU units).\n\nYou can set additional limits for each `ContainerDefinition` in the group. If individual containers have limits, this value must be equal to or greater than the sum of all container-specific CPU limits in the group.", - "title": "TotalCpuLimit", "type": "number" }, "TotalMemoryLimit": { - "markdownDescription": "The amount of memory (in MiB) on a fleet instance to allocate for the container group. All containers in the group share these resources.\n\nYou can set additional limits for each `ContainerDefinition` in the group. If individual containers have limits, this value must meet the following requirements:\n\n- Equal to or greater than the sum of all container-specific soft memory limits in the group.\n- Equal to or greater than any container-specific hard limits in the group.", - "title": "TotalMemoryLimit", "type": "number" } }, @@ -103040,77 +103032,51 @@ "items": { "type": "string" }, - "markdownDescription": "A command that's passed to the container on startup. Each argument for the command is an additional string in the array. See the [ContainerDefinition::command](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html#ECS-Type-ContainerDefinition-command) parameter in the *Amazon Elastic Container Service API reference.*", - "title": "Command", "type": "array" }, "ContainerName": { - "markdownDescription": "The container definition identifier. Container names are unique within a container group definition.", - "title": "ContainerName", "type": "string" }, "Cpu": { - "markdownDescription": "The number of CPU units that are reserved for the container. Note: 1 vCPU unit equals 1024 CPU units. If no resources are reserved, the container shares the total CPU limit for the container group.\n\n*Related data type:* `ContainerGroupDefinition$TotalCpuLimit`", - "title": "Cpu", "type": "number" }, "DependsOn": { "items": { "$ref": "#/definitions/AWS::GameLift::ContainerGroupDefinition.ContainerDependency" }, - "markdownDescription": "Indicates that the container relies on the status of other containers in the same container group during its startup and shutdown sequences. A container might have dependencies on multiple containers.", - "title": "DependsOn", "type": "array" }, "EntryPoint": { "items": { "type": "string" }, - "markdownDescription": "The entry point that's passed to the container on startup. If there are multiple arguments, each argument is an additional string in the array. See the [ContainerDefinition::entryPoint](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html#ECS-Type-ContainerDefinition-entryPoint) parameter in the *Amazon Elastic Container Service API Reference* .", - "title": "EntryPoint", "type": "array" }, "Environment": { "items": { "$ref": "#/definitions/AWS::GameLift::ContainerGroupDefinition.ContainerEnvironment" }, - "markdownDescription": "A set of environment variables that's passed to the container on startup. See the [ContainerDefinition::environment](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html#ECS-Type-ContainerDefinition-environment) parameter in the *Amazon Elastic Container Service API Reference* .", - "title": "Environment", "type": "array" }, "Essential": { - "markdownDescription": "Indicates whether the container is vital to the container group. If an essential container fails, the entire container group is restarted.", - "title": "Essential", "type": "boolean" }, "HealthCheck": { - "$ref": "#/definitions/AWS::GameLift::ContainerGroupDefinition.ContainerHealthCheck", - "markdownDescription": "A configuration for a non-terminal health check. A container, which automatically restarts if it stops functioning, also restarts if it fails this health check. If an essential container in the daemon group fails a health check, the entire container group is restarted. The essential container in the replica group doesn't use this health check mechanism, because the Amazon GameLift Agent automatically handles the task.", - "title": "HealthCheck" + "$ref": "#/definitions/AWS::GameLift::ContainerGroupDefinition.ContainerHealthCheck" }, "ImageUri": { - "markdownDescription": "The URI to the image that $short; copied and deployed to a container fleet. For a more specific identifier, see `ResolvedImageDigest` .", - "title": "ImageUri", "type": "string" }, "MemoryLimits": { - "$ref": "#/definitions/AWS::GameLift::ContainerGroupDefinition.MemoryLimits", - "markdownDescription": "The amount of memory that Amazon GameLift makes available to the container. If memory limits aren't set for an individual container, the container shares the container group's total memory allocation.\n\n*Related data type:* `ContainerGroupDefinition$TotalMemoryLimit`", - "title": "MemoryLimits" + "$ref": "#/definitions/AWS::GameLift::ContainerGroupDefinition.MemoryLimits" }, "PortConfiguration": { - "$ref": "#/definitions/AWS::GameLift::ContainerGroupDefinition.PortConfiguration", - "markdownDescription": "Defines the ports that are available to assign to processes in the container. For example, a game server process requires a container port to allow game clients to connect to it. Container ports aren't directly accessed by inbound traffic. Amazon GameLift maps these container ports to externally accessible connection ports, which are assigned as needed from the container fleet's `ConnectionPortRange` .", - "title": "PortConfiguration" + "$ref": "#/definitions/AWS::GameLift::ContainerGroupDefinition.PortConfiguration" }, "ResolvedImageDigest": { - "markdownDescription": "A unique and immutable identifier for the container image that is deployed to a container fleet. The digest is a SHA 256 hash of the container image manifest.", - "title": "ResolvedImageDigest", "type": "string" }, "WorkingDirectory": { - "markdownDescription": "The directory in the container where commands are run. See the [ContainerDefinition::workingDirectory](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html#ECS-Type-ContainerDefinition-workingDirectory) parameter in the *Amazon Elastic Container Service API Reference* .", - "title": "WorkingDirectory", "type": "string" } }, @@ -103177,7 +103143,7 @@ "type": "number" }, "Retries": { - "markdownDescription": "The number of times to retry a failed health check before the container is considered unhealthy. The first run of the command does not count as a retry.", + "markdownDescription": "The number of times to retry a failed health check before flagging the container unhealthy. The first run of the command does not count as a retry.", "title": "Retries", "type": "number" }, @@ -103187,7 +103153,7 @@ "type": "number" }, "Timeout": { - "markdownDescription": "The time period (in seconds) to wait for a health check to succeed before a failed health check is counted.", + "markdownDescription": "The time period (in seconds) to wait for a health check to succeed before counting a failed health check.", "title": "Timeout", "type": "number" } @@ -103227,13 +103193,9 @@ "additionalProperties": false, "properties": { "HardLimit": { - "markdownDescription": "", - "title": "HardLimit", "type": "number" }, "SoftLimit": { - "markdownDescription": "", - "title": "SoftLimit", "type": "number" } }, @@ -103297,7 +103259,7 @@ "title": "AnywhereConfiguration" }, "ApplyCapacity": { - "markdownDescription": "Current resource capacity settings for managed EC2 fleets. For multi-location fleets, location values might refer to a fleet's remote location or its home Region.\n\n*Returned by:* [DescribeFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetCapacity.html) , [DescribeFleetLocationCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html) , [UpdateFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetCapacity.html)", + "markdownDescription": "Current resource capacity settings for managed EC2 fleets and managed container fleets. For multi-location fleets, location values might refer to a fleet's remote location or its home Region.\n\n*Returned by:* [DescribeFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetCapacity.html) , [DescribeFleetLocationCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html) , [UpdateFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetCapacity.html)", "title": "ApplyCapacity", "type": "string" }, @@ -103312,14 +103274,12 @@ "title": "CertificateConfiguration" }, "ComputeType": { - "markdownDescription": "The type of compute resource used to host your game servers.\n\n- `EC2` \u2013 The game server build is deployed to Amazon EC2 instances for cloud hosting. This is the default setting.\n- `CONTAINER` \u2013 Container images with your game server build and supporting software are deployed to Amazon EC2 instances for cloud hosting. With this compute type, you must specify the `ContainerGroupsConfiguration` parameter.\n- `ANYWHERE` \u2013 Game servers or container images with your game server and supporting software are deployed to compute resources that are provided and managed by you. With this compute type, you can also set the `AnywhereConfiguration` parameter.", + "markdownDescription": "The type of compute resource used to host your game servers.\n\n- `EC2` \u2013 The game server build is deployed to Amazon EC2 instances for cloud hosting. This is the default setting.\n- `ANYWHERE` \u2013 Game servers and supporting software are deployed to compute resources that you provide and manage. With this compute type, you can also set the `AnywhereConfiguration` parameter.", "title": "ComputeType", "type": "string" }, "ContainerGroupsConfiguration": { - "$ref": "#/definitions/AWS::GameLift::Fleet.ContainerGroupsConfiguration", - "markdownDescription": "*This data type is currently not available. It is under improvement as we respond to customer feedback from the Containers public preview.*\n\nConfiguration details for a set of container groups, for use when creating a fleet with compute type `CONTAINER` .\n\n*Used with:* `CreateFleet`", - "title": "ContainerGroupsConfiguration" + "$ref": "#/definitions/AWS::GameLift::Fleet.ContainerGroupsConfiguration" }, "Description": { "markdownDescription": "A description for the fleet.", @@ -103335,12 +103295,12 @@ "items": { "$ref": "#/definitions/AWS::GameLift::Fleet.IpPermission" }, - "markdownDescription": "The IP address ranges and port settings that allow inbound traffic to access game server processes and other processes on this fleet. Set this parameter for EC2 and container fleets. You can leave this parameter empty when creating the fleet, but you must call `UpdateFleetPortSettings` to set it before players can connect to game sessions. As a best practice, we recommend opening ports for remote access only when you need them and closing them when you're finished. For Realtime Servers fleets, Amazon GameLift automatically sets TCP and UDP ranges.\n\nTo manage inbound access for a container fleet, set this parameter to the same port numbers that you set for the fleet's connection port range. During the life of the fleet, update this parameter to control which connection ports are open to inbound traffic.", + "markdownDescription": "The IP address ranges and port settings that allow inbound traffic to access game server processes and other processes on this fleet. Set this parameter for managed EC2 fleets. You can leave this parameter empty when creating the fleet, but you must call [](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetPortSettings) to set it before players can connect to game sessions. As a best practice, we recommend opening ports for remote access only when you need them and closing them when you're finished. For Realtime Servers fleets, Amazon GameLift automatically sets TCP and UDP ranges.", "title": "EC2InboundPermissions", "type": "array" }, "EC2InstanceType": { - "markdownDescription": "The Amazon GameLift-supported Amazon EC2 instance type to use with EC2 and container fleets. Instance type determines the computing resources that will be used to host your game servers, including CPU, memory, storage, and networking capacity. See [Amazon Elastic Compute Cloud Instance Types](https://docs.aws.amazon.com/ec2/instance-types/) for detailed descriptions of Amazon EC2 instance types.", + "markdownDescription": "The Amazon GameLift-supported Amazon EC2 instance type to use with managed EC2 fleets. Instance type determines the computing resources that will be used to host your game servers, including CPU, memory, storage, and networking capacity. See [Amazon Elastic Compute Cloud Instance Types](https://docs.aws.amazon.com/ec2/instance-types/) for detailed descriptions of Amazon EC2 instance types.", "title": "EC2InstanceType", "type": "string" }, @@ -103350,12 +103310,12 @@ "type": "string" }, "InstanceRoleARN": { - "markdownDescription": "A unique identifier for an IAM role with access permissions to other AWS services. Any application that runs on an instance in the fleet--including install scripts, server processes, and other processes--can use these permissions to interact with AWS resources that you own or have access to. For more information about using the role with your game server builds, see [Communicate with other AWS resources from your fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is \"EC2\".", + "markdownDescription": "A unique identifier for an IAM role that manages access to your AWS services. With an instance role ARN set, any application that runs on an instance in this fleet can assume the role, including install scripts, server processes, and daemons (background processes). Create a role or look up a role's ARN by using the [IAM dashboard](https://docs.aws.amazon.com/iam/) in the AWS Management Console . Learn more about using on-box credentials for your game servers at [Access external resources from a game server](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is `EC2` .", "title": "InstanceRoleARN", "type": "string" }, "InstanceRoleCredentialsProvider": { - "markdownDescription": "Indicates that fleet instances maintain a shared credentials file for the IAM role defined in `InstanceRoleArn` . Shared credentials allow applications that are deployed with the game server executable to communicate with other AWS resources. This property is used only when the game server is integrated with the server SDK version 5.x. For more information about using shared credentials, see [Communicate with other AWS resources from your fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is \"EC2\".", + "markdownDescription": "Indicates that fleet instances maintain a shared credentials file for the IAM role defined in `InstanceRoleArn` . Shared credentials allow applications that are deployed with the game server executable to communicate with other AWS resources. This property is used only when the game server is integrated with the server SDK version 5.x. For more information about using shared credentials, see [Communicate with other AWS resources from your fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is `EC2` .", "title": "InstanceRoleCredentialsProvider", "type": "string" }, @@ -103487,13 +103447,9 @@ "additionalProperties": false, "properties": { "FromPort": { - "markdownDescription": "Starting value for the port range.", - "title": "FromPort", "type": "number" }, "ToPort": { - "markdownDescription": "Ending value for the port. Port numbers are end-inclusive. This value must be equal to or greater than `FromPort` .", - "title": "ToPort", "type": "number" } }, @@ -103507,22 +103463,16 @@ "additionalProperties": false, "properties": { "ConnectionPortRange": { - "$ref": "#/definitions/AWS::GameLift::Fleet.ConnectionPortRange", - "markdownDescription": "A set of ports to allow inbound traffic, including game clients, to connect to processes running in the container fleet.\n\nConnection ports are dynamically mapped to container ports, which are assigned to individual processes running in a container. The connection port range must have enough ports to map to all container ports across a fleet instance. To calculate the minimum connection ports needed, use the following formula:\n\n*[Total number of container ports as defined for containers in the replica container group] * [Desired or calculated number of replica container groups per instance] + [Total number of container ports as defined for containers in the daemon container group]*\n\nAs a best practice, double the minimum number of connection ports.\n\n> Use the fleet's `EC2InboundPermissions` property to control external access to connection ports. Set this property to the connection port numbers that you want to open access to. See `IpPermission` for more details.", - "title": "ConnectionPortRange" + "$ref": "#/definitions/AWS::GameLift::Fleet.ConnectionPortRange" }, "ContainerGroupDefinitionNames": { "items": { "type": "string" }, - "markdownDescription": "The list of container group definition names to deploy to a new container fleet.", - "title": "ContainerGroupDefinitionNames", "type": "array" }, "ContainerGroupsPerInstance": { - "$ref": "#/definitions/AWS::GameLift::Fleet.ContainerGroupsPerInstance", - "markdownDescription": "", - "title": "ContainerGroupsPerInstance" + "$ref": "#/definitions/AWS::GameLift::Fleet.ContainerGroupsPerInstance" } }, "required": [ @@ -103535,13 +103485,9 @@ "additionalProperties": false, "properties": { "DesiredReplicaContainerGroupsPerInstance": { - "markdownDescription": "The desired number of replica container groups to place on each fleet instance.", - "title": "DesiredReplicaContainerGroupsPerInstance", "type": "number" }, "MaxReplicaContainerGroupsPerInstance": { - "markdownDescription": "The maximum possible number of replica container groups that each fleet instance can have.", - "title": "MaxReplicaContainerGroupsPerInstance", "type": "number" } }, @@ -103615,7 +103561,7 @@ }, "LocationCapacity": { "$ref": "#/definitions/AWS::GameLift::Fleet.LocationCapacity", - "markdownDescription": "Current resource capacity settings for managed EC2 fleets. For multi-location fleets, location values might refer to a fleet's remote location or its home Region.\n\n*Returned by:* [DescribeFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetCapacity.html) , [DescribeFleetLocationCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html) , [UpdateFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetCapacity.html)", + "markdownDescription": "Current resource capacity settings for managed EC2 fleets and managed container fleets. For multi-location fleets, location values might refer to a fleet's remote location or its home Region.\n\n*Returned by:* [DescribeFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetCapacity.html) , [DescribeFleetLocationCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html) , [UpdateFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetCapacity.html)", "title": "LocationCapacity" } }, @@ -103649,7 +103595,7 @@ "type": "number" }, "MaxConcurrentGameSessionActivations": { - "markdownDescription": "The number of game sessions in status `ACTIVATING` to allow on an instance. This setting limits the instance resources that can be used for new game activations at any one time.", + "markdownDescription": "The number of game sessions in status `ACTIVATING` to allow on an instance or compute. This setting limits the instance resources that can be used for new game activations at any one time.", "title": "MaxConcurrentGameSessionActivations", "type": "number" }, @@ -103738,7 +103684,7 @@ "additionalProperties": false, "properties": { "ConcurrentExecutions": { - "markdownDescription": "The number of server processes using this configuration that run concurrently on each instance.", + "markdownDescription": "The number of server processes using this configuration that run concurrently on each instance or compute.", "title": "ConcurrentExecutions", "type": "number" }, @@ -104046,7 +103992,7 @@ "items": { "$ref": "#/definitions/AWS::GameLift::GameSessionQueue.PlayerLatencyPolicy" }, - "markdownDescription": "A set of policies that act as a sliding cap on player latency. FleetIQ works to deliver low latency for most players in a game session. These policies ensure that no individual player can be placed into a game with unreasonably high latency. Use multiple policies to gradually relax latency requirements a step at a time. Multiple policies are applied based on their maximum allowed latency, starting with the lowest value.", + "markdownDescription": "A set of policies that enforce a sliding cap on player latency when processing game sessions placement requests. Use multiple policies to gradually relax the cap over time if Amazon GameLift can't make a placement. Policies are evaluated in order starting with the lowest maximum latency value.", "title": "PlayerLatencyPolicies", "type": "array" }, @@ -104064,7 +104010,7 @@ "type": "array" }, "TimeoutInSeconds": { - "markdownDescription": "The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a `TIMED_OUT` status. By default, this property is set to `600` .", + "markdownDescription": "The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a `TIMED_OUT` status.", "title": "TimeoutInSeconds", "type": "number" } @@ -104279,7 +104225,7 @@ "type": "number" }, "AdditionalPlayerCount": { - "markdownDescription": "The number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single 10-person team, and the additional player count is set to 2, 10 players will be selected for the match and 2 more player slots will be open for future players. This parameter is not used if `FlexMatchMode` is set to `STANDALONE` .", + "markdownDescription": "The number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single 12-person team, and the additional player count is set to 2, only 10 players are selected for the match. This parameter is not used if `FlexMatchMode` is set to `STANDALONE` .", "title": "AdditionalPlayerCount", "type": "number" }, @@ -130125,7 +130071,7 @@ "additionalProperties": false, "properties": { "DashboardDefinition": { - "markdownDescription": "The dashboard definition specified in a JSON literal. For detailed information, see [Creating dashboards (CLI)](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/create-dashboards-using-aws-cli.html) in the *AWS IoT SiteWise User Guide* .", + "markdownDescription": "The dashboard definition specified in a JSON literal.\n\n- AWS IoT SiteWise Monitor (Classic) see [Create dashboards ( AWS CLI )](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/create-dashboards-using-aws-cli.html)\n- AWS IoT SiteWise Monitor (AI-aware) see [Create dashboards ( AWS CLI )](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/create-dashboards-ai-dashboard-cli.html)\n\nin the *AWS IoT SiteWise User Guide*", "title": "DashboardDefinition", "type": "string" }, @@ -139249,7 +139195,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A set of tags to assign to the Firehose stream. A tag is a key-value pair that you can define and assign to AWS resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the Firehose stream. For more information about tags, see [Using Cost Allocation Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the AWS Billing and Cost Management User Guide.\n\nYou can specify up to 50 tags when creating a Firehose stream.\n\nIf you specify tags in the `CreateDeliveryStream` action, Amazon Data Firehose performs an additional authorization on the `firehose:TagDeliveryStream` action to verify if users have permissions to create tags. If you do not provide this permission, requests to create new Firehose Firehose streams with IAM resource tags will fail with an `AccessDeniedException` such as following.\n\n*AccessDeniedException*\n\nUser: arn:aws:sts::x:assumed-role/x/x is not authorized to perform: firehose:TagDeliveryStream on resource: arn:aws:firehose:us-east-1:x:deliverystream/x with an explicit deny in an identity-based policy.\n\nFor an example IAM policy, see [Tag example.](https://docs.aws.amazon.com/firehose/latest/APIReference/API_CreateDeliveryStream.html#API_CreateDeliveryStream_Examples)", + "markdownDescription": "A set of tags to assign to the Firehose stream. A tag is a key-value pair that you can define and assign to AWS resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the Firehose stream. For more information about tags, see [Using Cost Allocation Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the AWS Billing and Cost Management User Guide.\n\nYou can specify up to 50 tags when creating a Firehose stream.\n\nIf you specify tags in the `CreateDeliveryStream` action, Amazon Data Firehose performs an additional authorization on the `firehose:TagDeliveryStream` action to verify if users have permissions to create tags. If you do not provide this permission, requests to create new Firehose streams with IAM resource tags will fail with an `AccessDeniedException` such as following.\n\n*AccessDeniedException*\n\nUser: arn:aws:sts::x:assumed-role/x/x is not authorized to perform: firehose:TagDeliveryStream on resource: arn:aws:firehose:us-east-1:x:deliverystream/x with an explicit deny in an identity-based policy.\n\nFor an example IAM policy, see [Tag example.](https://docs.aws.amazon.com/firehose/latest/APIReference/API_CreateDeliveryStream.html#API_CreateDeliveryStream_Examples)", "title": "Tags", "type": "array" } @@ -140437,7 +140383,7 @@ }, "ParquetSerDe": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.ParquetSerDe", - "markdownDescription": "A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, see [Apache Parquet](https://docs.aws.amazon.com/https://parquet.apache.org/documentation/latest/) .", + "markdownDescription": "A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, see [Apache Parquet](https://docs.aws.amazon.com/https://parquet.apache.org/docs/contribution-guidelines/) .", "title": "ParquetSerDe" } }, @@ -140457,7 +140403,7 @@ "title": "CloudWatchLoggingOptions" }, "ContentColumnName": { - "markdownDescription": "The name of the record content column", + "markdownDescription": "The name of the record content column.", "title": "ContentColumnName", "type": "string" }, @@ -140477,7 +140423,7 @@ "type": "string" }, "MetaDataColumnName": { - "markdownDescription": "The name of the record metadata column", + "markdownDescription": "Specify a column name in the table, where the metadata information has to be loaded. When you enable this field, you will see the following column in the snowflake table, which differs based on the source type.\n\nFor Direct PUT as source\n\n`{ \"firehoseDeliveryStreamName\" : \"streamname\", \"IngestionTime\" : \"timestamp\" }`\n\nFor Kinesis Data Stream as source\n\n`\"kinesisStreamName\" : \"streamname\", \"kinesisShardId\" : \"Id\", \"kinesisPartitionKey\" : \"key\", \"kinesisSequenceNumber\" : \"1234\", \"subsequenceNumber\" : \"2334\", \"IngestionTime\" : \"timestamp\" }`", "title": "MetaDataColumnName", "type": "string" }, @@ -142484,7 +142430,7 @@ "properties": { "DestinationConfig": { "$ref": "#/definitions/AWS::Lambda::EventInvokeConfig.DestinationConfig", - "markdownDescription": "A destination for events after they have been sent to a function for processing.\n\n**Destinations** - *Function* - The Amazon Resource Name (ARN) of a Lambda function.\n- *Queue* - The ARN of a standard SQS queue.\n- *Topic* - The ARN of a standard SNS topic.\n- *Event Bus* - The ARN of an Amazon EventBridge event bus.", + "markdownDescription": "A destination for events after they have been sent to a function for processing.\n\n**Destinations** - *Function* - The Amazon Resource Name (ARN) of a Lambda function.\n- *Queue* - The ARN of a standard SQS queue.\n- *Bucket* - The ARN of an Amazon S3 bucket.\n- *Topic* - The ARN of a standard SNS topic.\n- *Event Bus* - The ARN of an Amazon EventBridge event bus.\n\n> S3 buckets are supported only for on-failure destinations. To retain records of successful invocations, use another destination type.", "title": "DestinationConfig" }, "FunctionName": { @@ -142555,7 +142501,7 @@ "additionalProperties": false, "properties": { "Destination": { - "markdownDescription": "The Amazon Resource Name (ARN) of the destination resource.\n\nTo retain records of [asynchronous invocations](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) , you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.\n\nTo retain records of failed invocations from [Kinesis and DynamoDB event sources](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#event-source-mapping-destinations) , you can configure an Amazon SNS topic or Amazon SQS queue as the destination.\n\nTo retain records of failed invocations from [self-managed Kafka](https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination) or [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination) , you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination.", + "markdownDescription": "The Amazon Resource Name (ARN) of the destination resource.\n\nTo retain records of unsuccessful [asynchronous invocations](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) , you can configure an Amazon SNS topic, Amazon SQS queue, Amazon S3 bucket, Lambda function, or Amazon EventBridge event bus as the destination.\n\nTo retain records of failed invocations from [Kinesis](https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html) , [DynamoDB](https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html) , [self-managed Kafka](https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination) or [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination) , you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination.", "title": "Destination", "type": "string" } @@ -142854,7 +142800,7 @@ "additionalProperties": false, "properties": { "Destination": { - "markdownDescription": "The Amazon Resource Name (ARN) of the destination resource.\n\nTo retain records of [asynchronous invocations](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) , you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.\n\nTo retain records of failed invocations from [Kinesis and DynamoDB event sources](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#event-source-mapping-destinations) , you can configure an Amazon SNS topic or Amazon SQS queue as the destination.\n\nTo retain records of failed invocations from [self-managed Kafka](https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination) or [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination) , you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination.", + "markdownDescription": "The Amazon Resource Name (ARN) of the destination resource.\n\nTo retain records of unsuccessful [asynchronous invocations](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) , you can configure an Amazon SNS topic, Amazon SQS queue, Amazon S3 bucket, Lambda function, or Amazon EventBridge event bus as the destination.\n\nTo retain records of failed invocations from [Kinesis](https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html) , [DynamoDB](https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html) , [self-managed Kafka](https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination) or [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination) , you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination.", "title": "Destination", "type": "string" } @@ -143014,7 +142960,7 @@ "title": "ImageConfig" }, "KmsKeyArn": { - "markdownDescription": "The ARN of the AWS Key Management Service ( AWS KMS ) customer managed key that's used to encrypt your function's [environment variables](https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-encryption) . When [Lambda SnapStart](https://docs.aws.amazon.com/lambda/latest/dg/snapstart-security.html) is activated, Lambda also uses this key is to encrypt your function's snapshot. If you deploy your function using a container image, Lambda also uses this key to encrypt your function when it's deployed. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry ( Amazon ECR ). If you don't provide a customer managed key, Lambda uses a default service key.", + "markdownDescription": "The ARN of the AWS Key Management Service ( AWS KMS ) customer managed key that's used to encrypt the following resources:\n\n- The function's [environment variables](https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-encryption) .\n- The function's [Lambda SnapStart](https://docs.aws.amazon.com/lambda/latest/dg/snapstart-security.html) snapshots.\n- When used with `SourceKMSKeyArn` , the unzipped version of the .zip deployment package that's used for function invocations. For more information, see [Specifying a customer managed key for Lambda](https://docs.aws.amazon.com/lambda/latest/dg/encrypt-zip-package.html#enable-zip-custom-encryption) .\n- The optimized version of the container image that's used for function invocations. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry (Amazon ECR). For more information, see [Function lifecycle](https://docs.aws.amazon.com/lambda/latest/dg/images-create.html#images-lifecycle) .\n\nIf you don't provide a customer managed key, Lambda uses an [AWS owned key](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk) or an [AWS managed key](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) .", "title": "KmsKeyArn", "type": "string" }, @@ -164278,12 +164224,12 @@ "items": { "$ref": "#/definitions/AWS::MediaStore::Container.CorsRule" }, - "markdownDescription": "Sets the cross-origin resource sharing (CORS) configuration on a container so that the container can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com to access your AWS Elemental MediaStore container at my.example.container.com by using the browser's XMLHttpRequest capability.\n\nTo enable CORS on a container, you attach a CORS policy to the container. In the CORS policy, you configure rules that identify origins and the HTTP methods that can be executed on your container. The policy can contain up to 398,000 characters. You can add up to 100 rules to a CORS policy. If more than one rule applies, the service uses the first applicable rule listed.\n\nTo learn more about CORS, see [Cross-Origin Resource Sharing (CORS) in AWS Elemental MediaStore](https://docs.aws.amazon.com/mediastore/latest/ug/cors-policy.html) .", + "markdownDescription": "> End of support notice: On November 13, 2025, AWS will discontinue support for AWS Elemental MediaStore. After November 13, 2025, you will no longer be able to access the AWS Elemental MediaStore console or AWS Elemental MediaStore resources. For more information, visit this [blog post](https://docs.aws.amazon.com/media/support-for-aws-elemental-mediastore-ending-soon/) . \n\nSets the cross-origin resource sharing (CORS) configuration on a container so that the container can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com to access your AWS Elemental MediaStore container at my.example.container.com by using the browser's XMLHttpRequest capability.\n\nTo enable CORS on a container, you attach a CORS policy to the container. In the CORS policy, you configure rules that identify origins and the HTTP methods that can be executed on your container. The policy can contain up to 398,000 characters. You can add up to 100 rules to a CORS policy. If more than one rule applies, the service uses the first applicable rule listed.\n\nTo learn more about CORS, see [Cross-Origin Resource Sharing (CORS) in AWS Elemental MediaStore](https://docs.aws.amazon.com/mediastore/latest/ug/cors-policy.html) .", "title": "CorsPolicy", "type": "array" }, "LifecyclePolicy": { - "markdownDescription": "Writes an object lifecycle policy to a container. If the container already has an object lifecycle policy, the service replaces the existing policy with the new policy. It takes up to 20 minutes for the change to take effect.\n\nFor information about how to construct an object lifecycle policy, see [Components of an Object Lifecycle Policy](https://docs.aws.amazon.com/mediastore/latest/ug/policies-object-lifecycle-components.html) .", + "markdownDescription": "> End of support notice: On November 13, 2025, AWS will discontinue support for AWS Elemental MediaStore. After November 13, 2025, you will no longer be able to access the AWS Elemental MediaStore console or AWS Elemental MediaStore resources. For more information, visit this [blog post](https://docs.aws.amazon.com/media/support-for-aws-elemental-mediastore-ending-soon/) . \n\nWrites an object lifecycle policy to a container. If the container already has an object lifecycle policy, the service replaces the existing policy with the new policy. It takes up to 20 minutes for the change to take effect.\n\nFor information about how to construct an object lifecycle policy, see [Components of an Object Lifecycle Policy](https://docs.aws.amazon.com/mediastore/latest/ug/policies-object-lifecycle-components.html) .", "title": "LifecyclePolicy", "type": "string" }, @@ -224995,7 +224941,7 @@ "type": "number" }, "MinCapacity": { - "markdownDescription": "The minimum number of Aurora capacity units (ACUs) for a DB instance in an Aurora Serverless v2 cluster. You can specify ACU values in half-step increments, such as 8, 8.5, 9, and so on. The smallest value that you can use is 0.5.", + "markdownDescription": "The minimum number of Aurora capacity units (ACUs) for a DB instance in an Aurora Serverless v2 cluster. You can specify ACU values in half-step increments, such as 8, 8.5, 9, and so on. For Aurora versions that support the Aurora Serverless v2 auto-pause feature, the smallest value that you can use is 0. For versions that don't support Aurora Serverless v2 auto-pause, the smallest value that you can use is 0.5.", "title": "MinCapacity", "type": "number" } diff --git a/schema_source/cloudformation-docs.json b/schema_source/cloudformation-docs.json index 990999b34..38de789b2 100644 --- a/schema_source/cloudformation-docs.json +++ b/schema_source/cloudformation-docs.json @@ -273,14 +273,21 @@ "OutcomeAlarms": "The alarm that you specify to monitor the health of your application during practice runs. When the outcome alarm goes into an `ALARM` state, the practice run is ended and the outcome is set to `FAILED` ." }, "AWS::AccessAnalyzer::Analyzer": { - "AnalyzerConfiguration": "Contains information about the configuration of an unused access analyzer for an AWS organization or account.", + "AnalyzerConfiguration": "Contains information about the configuration of an analyzer for an AWS organization or account.", "AnalyzerName": "The name of the analyzer.", "ArchiveRules": "Specifies the archive rules to add for the analyzer. Archive rules automatically archive findings that meet the criteria you define for the rule.", - "Tags": "An array of key-value pairs to apply to the analyzer.", + "Tags": "An array of key-value pairs to apply to the analyzer. You can use the set of Unicode letters, digits, whitespace, `_` , `.` , `/` , `=` , `+` , and `-` .\n\nFor the tag key, you can specify a value that is 1 to 128 characters in length and cannot be prefixed with `aws:` .\n\nFor the tag value, you can specify a value that is 0 to 256 characters in length.", "Type": "The type represents the zone of trust for the analyzer.\n\n*Allowed Values* : ACCOUNT | ORGANIZATION | ACCOUNT_UNUSED_ACCESS | ORGANIZATION_UNUSED_ACCESS" }, + "AWS::AccessAnalyzer::Analyzer AnalysisRule": { + "Exclusions": "A list of rules for the analyzer containing criteria to exclude from analysis. Entities that meet the rule criteria will not generate findings." + }, + "AWS::AccessAnalyzer::Analyzer AnalysisRuleCriteria": { + "AccountIds": "A list of AWS account IDs to apply to the analysis rule criteria. The accounts cannot include the organization analyzer owner account. Account IDs can only be applied to the analysis rule criteria for organization-level analyzers. The list cannot include more than 2,000 account IDs.", + "ResourceTags": "An array of key-value pairs to match for your resources. You can use the set of Unicode letters, digits, whitespace, `_` , `.` , `/` , `=` , `+` , and `-` .\n\nFor the tag key, you can specify a value that is 1 to 128 characters in length and cannot be prefixed with `aws:` .\n\nFor the tag value, you can specify a value that is 0 to 256 characters in length. If the specified tag value is 0 characters, the rule is applied to all principals with the specified tag key." + }, "AWS::AccessAnalyzer::Analyzer AnalyzerConfiguration": { - "UnusedAccessConfiguration": "Specifies the configuration of an unused access analyzer for an AWS organization or account. External access analyzers do not support any configuration." + "UnusedAccessConfiguration": "Specifies the configuration of an unused access analyzer for an AWS organization or account." }, "AWS::AccessAnalyzer::Analyzer ArchiveRule": { "Filter": "The criteria for the rule.", @@ -298,7 +305,8 @@ "Value": "The value for the tag. You can specify a value that's 1 to 256 characters in length. You can use any of the following characters: the set of Unicode letters, digits, whitespace, `_` , `.` , `/` , `=` , `+` , and `-` .\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) ." }, "AWS::AccessAnalyzer::Analyzer UnusedAccessConfiguration": { - "UnusedAccessAge": "The specified access age in days for which to generate findings for unused access. For example, if you specify 90 days, the analyzer will generate findings for IAM entities within the accounts of the selected organization for any access that hasn't been used in 90 or more days since the analyzer's last scan. You can choose a value between 1 and 180 days." + "AnalysisRule": "Contains information about analysis rules for the analyzer. Analysis rules determine which entities will generate findings based on the criteria you define when you create the rule.", + "UnusedAccessAge": "The specified access age in days for which to generate findings for unused access. For example, if you specify 90 days, the analyzer will generate findings for IAM entities within the accounts of the selected organization for any access that hasn't been used in 90 or more days since the analyzer's last scan. You can choose a value between 1 and 365 days." }, "AWS::AmazonMQ::Broker": { "AuthenticationStrategy": "Optional. The authentication strategy used to secure the broker. The default is `SIMPLE` .", @@ -3533,6 +3541,7 @@ "OpsCenterEnabled": "Indicates whether Application Insights will create OpsItems for any problem that is detected by Application Insights for an application.", "OpsItemSNSTopicArn": "The SNS topic provided to Application Insights that is associated with the created OpsItems to receive SNS notifications for opsItem updates.", "ResourceGroupName": "The name of the resource group used for the application.", + "SNSNotificationArn": "The SNS topic ARN that is associated with SNS notifications for updates or issues.", "Tags": "An array of `Tags` ." }, "AWS::ApplicationInsights::Application Alarm": { @@ -3635,6 +3644,7 @@ "PatternSet": "The log pattern set." }, "AWS::ApplicationSignals::ServiceLevelObjective": { + "BurnRateConfigurations": "Each object in this array defines the length of the look-back window used to calculate one burn rate metric for this SLO. The burn rate measures how fast the service is consuming the error budget, relative to the attainment goal of the SLO.", "Description": "An optional description for this SLO.", "Goal": "This structure contains the attributes that determine the goal of an SLO. This includes the time period for evaluation and the attainment threshold.", "Name": "A name for this SLO.", @@ -3642,6 +3652,9 @@ "Sli": "A structure containing information about the performance metric that this SLO monitors, if this is a period-based SLO.", "Tags": "A list of key-value pairs to associate with the SLO. You can associate as many as 50 tags with an SLO. To be able to associate tags with the SLO when you create the SLO, you must have the cloudwatch:TagResource permission.\n\nTags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values." }, + "AWS::ApplicationSignals::ServiceLevelObjective BurnRateConfiguration": { + "LookBackWindowMinutes": "The number of minutes to use as the look-back window." + }, "AWS::ApplicationSignals::ServiceLevelObjective CalendarInterval": { "Duration": "Specifies the duration of each calendar interval. For example, if `Duration` is `1` and `DurationUnit` is `MONTH` , each interval is one month, aligned with the calendar.", "DurationUnit": "Specifies the calendar interval unit.", @@ -3860,8 +3873,10 @@ "AWS::AutoScaling::AutoScalingGroup": { "AutoScalingGroupName": "The name of the Auto Scaling group. This name must be unique per Region per account.\n\nThe name can contain any ASCII character 33 to 126 including most punctuation characters, digits, and upper and lowercased letters.\n\n> You cannot use a colon (:) in the name.", "AvailabilityZoneDistribution": "The instance capacity distribution across Availability Zones.", + "AvailabilityZoneImpairmentPolicy": "The Availability Zone impairment policy.", "AvailabilityZones": "A list of Availability Zones where instances in the Auto Scaling group can be created. Used for launching into the default VPC subnet in each Availability Zone when not using the `VPCZoneIdentifier` property, or for attaching a network interface when an existing network interface ID is specified in a launch template.", "CapacityRebalance": "Indicates whether Capacity Rebalancing is enabled. Otherwise, Capacity Rebalancing is disabled. When you turn on Capacity Rebalancing, Amazon EC2 Auto Scaling attempts to launch a Spot Instance whenever Amazon EC2 notifies that a Spot Instance is at an elevated risk of interruption. After launching a new instance, it then terminates an old instance. For more information, see [Use Capacity Rebalancing to handle Amazon EC2 Spot Interruptions](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-capacity-rebalancing.html) in the in the *Amazon EC2 Auto Scaling User Guide* .", + "CapacityReservationSpecification": "The capacity reservation specification.", "Context": "Reserved.", "Cooldown": "*Only needed if you use simple scaling policies.*\n\nThe amount of time, in seconds, between one scaling activity ending and another one starting due to simple scaling policies. For more information, see [Scaling cooldowns for Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-scaling-cooldowns.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nDefault: `300` seconds", "DefaultInstanceWarmup": "The amount of time, in seconds, until a new instance is considered to have finished initializing and resource consumption to become stable after it enters the `InService` state.\n\nDuring an instance refresh, Amazon EC2 Auto Scaling waits for the warm-up period after it replaces an instance before it moves on to replacing the next instance. Amazon EC2 Auto Scaling also waits for the warm-up period before aggregating the metrics for new instances with existing instances in the Amazon CloudWatch metrics that are used for scaling, resulting in more reliable usage data. For more information, see [Set the default instance warmup for an Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-default-instance-warmup.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\n> To manage various warm-up settings at the group level, we recommend that you set the default instance warmup, *even if it is set to 0 seconds* . To remove a value that you previously set, include the property but specify `-1` for the value. However, we strongly recommend keeping the default instance warmup enabled by specifying a value of `0` or other nominal value. \n\nDefault: None", @@ -3884,6 +3899,7 @@ "NotificationConfigurations": "Configures an Auto Scaling group to send notifications when specified events take place.", "PlacementGroup": "The name of the placement group into which to launch your instances. For more information, see [Placement groups](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) in the *Amazon EC2 User Guide for Linux Instances* .\n\n> A *cluster* placement group is a logical grouping of instances within a single Availability Zone. You cannot specify multiple Availability Zones and a cluster placement group.", "ServiceLinkedRoleARN": "The Amazon Resource Name (ARN) of the service-linked role that the Auto Scaling group uses to call other AWS service on your behalf. By default, Amazon EC2 Auto Scaling uses a service-linked role named `AWSServiceRoleForAutoScaling` , which it creates if it does not exist. For more information, see [Service-linked roles](https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-service-linked-role.html) in the *Amazon EC2 Auto Scaling User Guide* .", + "SkipZonalShiftValidation": "", "Tags": "One or more tags. You can tag your Auto Scaling group and propagate the tags to the Amazon EC2 instances it launches. Tags are not propagated to Amazon EBS volumes. To add tags to Amazon EBS volumes, specify the tags in a launch template but use caution. If the launch template specifies an instance tag with a key that is also specified for the Auto Scaling group, Amazon EC2 Auto Scaling overrides the value of that instance tag with the value specified by the Auto Scaling group. For more information, see [Tag Auto Scaling groups and instances](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-tagging.html) in the *Amazon EC2 Auto Scaling User Guide* .", "TargetGroupARNs": "The Amazon Resource Names (ARN) of the Elastic Load Balancing target groups to associate with the Auto Scaling group. Instances are registered as targets with the target groups. The target groups receive incoming traffic and route requests to one or more registered targets. For more information, see [Use Elastic Load Balancing to distribute traffic across the instances in your Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-load-balancer.html) in the *Amazon EC2 Auto Scaling User Guide* .", "TerminationPolicies": "A policy or a list of policies that are used to select the instance to terminate. These policies are executed in the order that you list them. For more information, see [Configure termination policies for Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-termination-policies.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nValid values: `Default` | `AllocationStrategy` | `ClosestToNextInstanceHour` | `NewestInstance` | `OldestInstance` | `OldestLaunchConfiguration` | `OldestLaunchTemplate` | `arn:aws:lambda:region:account-id:function:my-function:my-alias`", @@ -3901,10 +3917,28 @@ "AWS::AutoScaling::AutoScalingGroup AvailabilityZoneDistribution": { "CapacityDistributionStrategy": "If launches fail in an Availability Zone, the following strategies are available. The default is `balanced-best-effort` .\n\n- `balanced-only` - If launches fail in an Availability Zone, Auto Scaling will continue to attempt to launch in the unhealthy zone to preserve a balanced distribution.\n- `balanced-best-effort` - If launches fail in an Availability Zone, Auto Scaling will attempt to launch in another healthy Availability Zone instead." }, + "AWS::AutoScaling::AutoScalingGroup AvailabilityZoneImpairmentPolicy": { + "ImpairedZoneHealthCheckBehavior": "Specifies the health check behavior for the impaired Availability Zone in an active zonal shift. If you select `Replace unhealthy` , instances that appear unhealthy will be replaced in all Availability Zones. If you select `Ignore unhealthy` , instances will not be replaced in the Availability Zone with the active zonal shift. For more information, see [Auto Scaling group zonal shift](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-zonal-shift.html) in the *Amazon EC2 Auto Scaling User Guide* .", + "ZonalShiftEnabled": "If `true` , enable zonal shift for your Auto Scaling group." + }, "AWS::AutoScaling::AutoScalingGroup BaselineEbsBandwidthMbpsRequest": { "Max": "The maximum value in Mbps.", "Min": "The minimum value in Mbps." }, + "AWS::AutoScaling::AutoScalingGroup BaselinePerformanceFactorsRequest": { + "Cpu": "The CPU performance to consider, using an instance family as the baseline reference." + }, + "AWS::AutoScaling::AutoScalingGroup CapacityReservationSpecification": { + "CapacityReservationPreference": "The capacity reservation preference. The following options are available:\n\n- `capacity-reservations-only` - Auto Scaling will only launch instances into a Capacity Reservation or Capacity Reservation resource group. If capacity isn't available, instances will fail to launch.\n- `capacity-reservations-first` - Auto Scaling will try to launch instances into a Capacity Reservation or Capacity Reservation resource group first. If capacity isn't available, instances will run in On-Demand capacity.\n- `none` - Auto Scaling will not launch instances into a Capacity Reservation. Instances will run in On-Demand capacity.\n- `default` - Auto Scaling uses the Capacity Reservation preference from your launch template or an open Capacity Reservation.", + "CapacityReservationTarget": "Describes a target Capacity Reservation or Capacity Reservation resource group." + }, + "AWS::AutoScaling::AutoScalingGroup CapacityReservationTarget": { + "CapacityReservationIds": "The Capacity Reservation IDs to launch instances into.", + "CapacityReservationResourceGroupArns": "The resource group ARNs of the Capacity Reservation to launch instances into." + }, + "AWS::AutoScaling::AutoScalingGroup CpuPerformanceFactorRequest": { + "References": "Specify an instance family to use as the baseline reference for CPU performance. All instance types that match your specified attributes will be compared against the CPU performance of the referenced instance family, regardless of CPU manufacturer or architecture differences.\n\n> Currently only one instance family can be specified in the list." + }, "AWS::AutoScaling::AutoScalingGroup InstanceMaintenancePolicy": { "MaxHealthyPercentage": "Specifies the upper threshold as a percentage of the desired capacity of the Auto Scaling group. It represents the maximum percentage of the group that can be in service and healthy, or pending, to support your workload when replacing instances. Value range is 100 to 200. To clear a previously set value, specify a value of `-1` .\n\nBoth `MinHealthyPercentage` and `MaxHealthyPercentage` must be specified, and the difference between them cannot be greater than 100. A large range increases the number of instances that can be replaced at the same time.", "MinHealthyPercentage": "Specifies the lower threshold as a percentage of the desired capacity of the Auto Scaling group. It represents the minimum percentage of the group to keep in service, healthy, and ready to use to support your workload when replacing instances. Value range is 0 to 100. To clear a previously set value, specify a value of `-1` ." @@ -3918,6 +3952,7 @@ "AllowedInstanceTypes": "The instance types to apply your specified attributes against. All other instance types are ignored, even if they match your specified attributes.\n\nYou can use strings with one or more wild cards, represented by an asterisk ( `*` ), to allow an instance type, size, or generation. The following are examples: `m5.8xlarge` , `c5*.*` , `m5a.*` , `r*` , `*3*` .\n\nFor example, if you specify `c5*` , Amazon EC2 Auto Scaling will allow the entire C5 instance family, which includes all C5a and C5n instance types. If you specify `m5a.*` , Amazon EC2 Auto Scaling will allow all the M5a instance types, but not the M5n instance types.\n\n> If you specify `AllowedInstanceTypes` , you can't specify `ExcludedInstanceTypes` . \n\nDefault: All instance types", "BareMetal": "Indicates whether bare metal instance types are included, excluded, or required.\n\nDefault: `excluded`", "BaselineEbsBandwidthMbps": "The minimum and maximum baseline bandwidth performance for an instance type, in Mbps. For more information, see [Amazon EBS\u2013optimized instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-optimized.html) in the *Amazon EC2 User Guide for Linux Instances* .\n\nDefault: No minimum or maximum limits", + "BaselinePerformanceFactors": "The baseline performance factors for the instance requirements.", "BurstablePerformance": "Indicates whether burstable performance instance types are included, excluded, or required. For more information, see [Burstable performance instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) in the *Amazon EC2 User Guide for Linux Instances* .\n\nDefault: `excluded`", "CpuManufacturers": "Lists which specific CPU manufacturers to include.\n\n- For instance types with Intel CPUs, specify `intel` .\n- For instance types with AMD CPUs, specify `amd` .\n- For instance types with AWS CPUs, specify `amazon-web-services` .\n\n> Don't confuse the CPU hardware manufacturer with the CPU hardware architecture. Instances will be launched with a compatible CPU architecture based on the Amazon Machine Image (AMI) that you specify in your launch template. \n\nDefault: Any manufacturer", "ExcludedInstanceTypes": "The instance types to exclude. You can use strings with one or more wild cards, represented by an asterisk ( `*` ), to exclude an instance family, type, size, or generation. The following are examples: `m5.8xlarge` , `c5*.*` , `m5a.*` , `r*` , `*3*` .\n\nFor example, if you specify `c5*` , you are excluding the entire C5 instance family, which includes all C5a and C5n instance types. If you specify `m5a.*` , Amazon EC2 Auto Scaling will exclude all the M5a instance types, but not the M5n instance types.\n\n> If you specify `ExcludedInstanceTypes` , you can't specify `AllowedInstanceTypes` . \n\nDefault: No excluded instance types", @@ -3995,6 +4030,9 @@ "NotificationTypes": "A list of event types that send a notification. Event types can include any of the following types.\n\n*Allowed values* :\n\n- `autoscaling:EC2_INSTANCE_LAUNCH`\n- `autoscaling:EC2_INSTANCE_LAUNCH_ERROR`\n- `autoscaling:EC2_INSTANCE_TERMINATE`\n- `autoscaling:EC2_INSTANCE_TERMINATE_ERROR`\n- `autoscaling:TEST_NOTIFICATION`", "TopicARN": "The Amazon Resource Name (ARN) of the Amazon SNS topic." }, + "AWS::AutoScaling::AutoScalingGroup PerformanceFactorReferenceRequest": { + "InstanceFamily": "The instance family to use as a baseline reference.\n\n> Make sure that you specify the correct value for the instance family. The instance family is everything before the period (.) in the instance type name. For example, in the instance `c6i.large` , the instance family is `c6i` , not `c6` . For more information, see [Amazon EC2 instance type naming conventions](https://docs.aws.amazon.com/ec2/latest/instancetypes/instance-type-names.html) in *Amazon EC2 Instance Types* . \n\nThe following instance types are *not supported* for performance protection.\n\n- `c1`\n- `g3| g3s`\n- `hpc7g`\n- `m1| m2`\n- `mac1 | mac2 | mac2-m1ultra | mac2-m2 | mac2-m2pro`\n- `p3dn | p4d | p5`\n- `t1`\n- `u-12tb1 | u-18tb1 | u-24tb1 | u-3tb1 | u-6tb1 | u-9tb1 | u7i-12tb | u7in-16tb | u7in-24tb | u7in-32tb`\n\nIf you performance protection by specifying a supported instance family, the returned instance types will exclude the preceding unsupported instance families.\n\nIf you specify an unsupported instance family as a value for baseline performance, the API returns an empty response." + }, "AWS::AutoScaling::AutoScalingGroup TagProperty": { "Key": "The tag key.", "PropagateAtLaunch": "Set to `true` if you want CloudFormation to copy the tag to EC2 instances that are launched as part of the Auto Scaling group. Set to `false` if you want the tag attached only to the Auto Scaling group and not copied to any instances launched as part of the Auto Scaling group.", @@ -4656,7 +4694,14 @@ "AWS::Batch::ComputeEnvironment LaunchTemplateSpecification": { "LaunchTemplateId": "The ID of the launch template.", "LaunchTemplateName": "The name of the launch template.", - "Version": "The version number of the launch template, `$Latest` , or `$Default` .\n\nIf the value is `$Latest` , the latest version of the launch template is used. If the value is `$Default` , the default version of the launch template is used.\n\n> If the AMI ID that's used in a compute environment is from the launch template, the AMI isn't changed when the compute environment is updated. It's only changed if the `updateToLatestImageVersion` parameter for the compute environment is set to `true` . During an infrastructure update, if either `$Latest` or `$Default` is specified, AWS Batch re-evaluates the launch template version, and it might use a different version of the launch template. This is the case even if the launch template isn't specified in the update. When updating a compute environment, changing the launch template requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* . \n\nDefault: `$Default` ." + "Overrides": "A launch template to use in place of the default launch template. You must specify either the launch template ID or launch template name in the request, but not both.\n\nYou can specify up to ten (10) launch template overrides that are associated to unique instance types or families for each compute environment.\n\n> To unset all override templates for a compute environment, you can pass an empty array to the [UpdateComputeEnvironment.overrides](https://docs.aws.amazon.com/batch/latest/APIReference/API_UpdateComputeEnvironment.html) parameter, or not include the `overrides` parameter when submitting the `UpdateComputeEnvironment` API operation.", + "Version": "The version number of the launch template, `$Default` , or `$Latest` .\n\nIf the value is `$Default` , the default version of the launch template is used. If the value is `$Latest` , the latest version of the launch template is used.\n\n> If the AMI ID that's used in a compute environment is from the launch template, the AMI isn't changed when the compute environment is updated. It's only changed if the `updateToLatestImageVersion` parameter for the compute environment is set to `true` . During an infrastructure update, if either `$Default` or `$Latest` is specified, AWS Batch re-evaluates the launch template version, and it might use a different version of the launch template. This is the case even if the launch template isn't specified in the update. When updating a compute environment, changing the launch template requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* . \n\nDefault: `$Default`\n\nLatest: `$Latest`" + }, + "AWS::Batch::ComputeEnvironment LaunchTemplateSpecificationOverride": { + "LaunchTemplateId": "The ID of the launch template.\n\n*Note:* If you specify the `launchTemplateId` you can't specify the `launchTemplateName` as well.", + "LaunchTemplateName": "The name of the launch template.\n\n*Note:* If you specify the `launchTemplateName` you can't specify the `launchTemplateId` as well.", + "TargetInstanceTypes": "The instance type or family that this this override launch template should be applied to.\n\nThis parameter is required when defining a launch template override.\n\nInformation included in this parameter must meet the following requirements:\n\n- Must be a valid Amazon EC2 instance type or family.\n- `optimal` isn't allowed.\n- `targetInstanceTypes` can target only instance types and families that are included within the [`ComputeResource.instanceTypes`](https://docs.aws.amazon.com/batch/latest/APIReference/API_ComputeResource.html#Batch-Type-ComputeResource-instanceTypes) set. `targetInstanceTypes` doesn't need to include all of the instances from the `instanceType` set, but at least a subset. For example, if `ComputeResource.instanceTypes` includes `[m5, g5]` , `targetInstanceTypes` can include `[m5.2xlarge]` and `[m5.large]` but not `[c5.large]` .\n- `targetInstanceTypes` included within the same launch template override or across launch template overrides can't overlap for the same compute environment. For example, you can't define one launch template override to target an instance family and another define an instance type within this same family.", + "Version": "The version number of the launch template, `$Default` , or `$Latest` .\n\nIf the value is `$Default` , the default version of the launch template is used. If the value is `$Latest` , the latest version of the launch template is used.\n\n> If the AMI ID that's used in a compute environment is from the launch template, the AMI isn't changed when the compute environment is updated. It's only changed if the `updateToLatestImageVersion` parameter for the compute environment is set to `true` . During an infrastructure update, if either `$Default` or `$Latest` is specified, AWS Batch re-evaluates the launch template version, and it might use a different version of the launch template. This is the case even if the launch template isn't specified in the update. When updating a compute environment, changing the launch template requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* . \n\nDefault: `$Default`\n\nLatest: `$Latest`" }, "AWS::Batch::ComputeEnvironment UpdatePolicy": { "JobExecutionTimeoutMinutes": "Specifies the job timeout (in minutes) when the compute environment infrastructure is updated. The default value is 30.", @@ -6413,27 +6458,98 @@ "ServiceTimeout": "The maximum time, in seconds, that can elapse before a custom resource operation times out.\n\nThe value must be an integer from 1 to 3600. The default value is 3600 seconds (1 hour).", "ServiceToken": "The service token, such as an Amazon SNS topic ARN or Lambda function ARN. The service token must be from the same Region as the stack.\n\nUpdates aren't supported." }, + "AWS::CloudFormation::GuardHook": { + "Alias": "The type name alias for the Hook. This alias must be unique per account and Region.\n\nThe alias must be in the form `Name1::Name2::Name3` and must not begin with `AWS` . For example, `Private::Guard::MyTestHook` .", + "ExecutionRole": "The IAM role that the Hook assumes to retrieve your Guard rules from S3 and optionally write a detailed Guard output report back.", + "FailureMode": "Specifies how the Hook responds when rules fail their evaluation.\n\n- `FAIL` : Prevents the action from proceeding. This is helpful for enforcing strict compliance or security policies.\n- `WARN` : Issues warnings to users but allows actions to continue. This is useful for non-critical validations or informational checks.", + "HookStatus": "Specifies if the Hook is `ENABLED` or `DISABLED` .", + "LogBucket": "Specifies the name of an S3 bucket to store the Guard output report. This report contains the results of your Guard rule validations.", + "Options": "Specifies the S3 location of your input parameters.", + "RuleLocation": "Specifies the S3 location of your Guard rules.", + "StackFilters": "Specifies the stack level filters for the Hook.", + "TargetFilters": "Specifies the target filters for the Hook.", + "TargetOperations": "Specifies which type of operation the Hook is run against.\n\nValid values: `STACK` | `RESOURCE` | `CHANGE_SET` | `CLOUD_CONTROL`" + }, + "AWS::CloudFormation::GuardHook Options": { + "InputParams": "Specifies the S3 location where your input parameters are located." + }, + "AWS::CloudFormation::GuardHook S3Location": { + "Uri": "Specifies the S3 path to the file containing your Guard rules or input parameters (in the form `s3:///` ).\n\nFor Guard rules, the object stored in S3 must have one of the following file extensions: `.guard` , `.zip` , or `.tar.gz` .\n\nFor input parameters, the object stored in S3 must have one of the following file extensions: `.yaml` , `.json` , `.zip` , or `.tar.gz` .", + "VersionId": "For S3 buckets with versioning enabled, specifies the unique ID of the S3 object version to download your Guard rules or input parameters from.\n\nThe Guard Hook downloads files from S3 every time the Hook is invoked. To prevent accidental changes or deletions, we recommend using a version when configuring your Guard Hook." + }, + "AWS::CloudFormation::GuardHook StackFilters": { + "FilteringCriteria": "The filtering criteria.\n\n- All stack names and stack roles ( `All` ): The Hook will only be invoked when all specified filters match.\n- Any stack names and stack roles ( `Any` ): The Hook will be invoked if at least one of the specified filters match.", + "StackNames": "Includes or excludes specific stacks from Hook invocations.", + "StackRoles": "Includes or excludes specific stacks from Hook invocations based on their associated IAM roles." + }, + "AWS::CloudFormation::GuardHook StackNames": { + "Exclude": "The stack names to exclude. All stacks except those listed here will invoke the Hook.", + "Include": "The stack names to include. Only the stacks specified in this list will invoke the Hook." + }, + "AWS::CloudFormation::GuardHook StackRoles": { + "Exclude": "The IAM role ARNs for stacks you want to exclude. The Hook will be invoked on all stacks except those initiated by the specified roles.", + "Include": "The IAM role ARNs to target stacks associated with these roles. Only stack operations initiated by these roles will invoke the Hook." + }, + "AWS::CloudFormation::GuardHook TargetFilters": { + "TargetFiltersItems": "The specific resource types, actions, and invocation points to target." + }, + "AWS::CloudFormation::GuardHook TargetFiltersItems": { + "Actions": "The actions to target. For `CHANGE_SET` Hook targets, you can only target `CREATE` actions.\n\nValid values: `CREATE` | `UPDATE` | `DELETE`", + "InvocationPoints": "The invocation points to target. The only valid value is `PRE_PROVISION` .", + "TargetNames": "The resource types to target, such as `AWS::S3::Bucket` or `AWS::DynamoDB::Table` ." + }, "AWS::CloudFormation::HookDefaultVersion": { - "TypeName": "The name of the hook.\n\nYou must specify either `TypeVersionArn` , or `TypeName` and `VersionId` .", + "TypeName": "The name of the Hook.\n\nYou must specify either `TypeVersionArn` , or `TypeName` and `VersionId` .", "TypeVersionArn": "The version ID of the type configuration.\n\nYou must specify either `TypeVersionArn` , or `TypeName` and `VersionId` .", "VersionId": "The version ID of the type specified.\n\nYou must specify either `TypeVersionArn` , or `TypeName` and `VersionId` ." }, "AWS::CloudFormation::HookTypeConfig": { - "Configuration": "Specifies the activated hook type configuration, in this AWS account and AWS Region .\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` .", - "ConfigurationAlias": "Specifies the activated hook type configuration, in this AWS account and AWS Region .\n\nDefaults to `default` alias. Hook types currently support default configuration alias.", - "TypeArn": "The Amazon Resource Number (ARN) for the hook to set `Configuration` for.\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` .", - "TypeName": "The unique name for your hook. Specifies a three-part namespace for your hook, with a recommended pattern of `Organization::Service::Hook` .\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` ." + "Configuration": "Specifies the activated Hook type configuration, in this AWS account and AWS Region .\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` .", + "ConfigurationAlias": "Specifies the activated Hook type configuration, in this AWS account and AWS Region .\n\nDefaults to `default` alias. Hook types currently support default configuration alias.", + "TypeArn": "The Amazon Resource Number (ARN) for the Hook to set `Configuration` for.\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` .", + "TypeName": "The unique name for your Hook. Specifies a three-part namespace for your Hook, with a recommended pattern of `Organization::Service::Hook` .\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` ." }, "AWS::CloudFormation::HookVersion": { - "ExecutionRoleArn": "The Amazon Resource Name (ARN) of the task execution role that grants the hook permission.", + "ExecutionRoleArn": "The Amazon Resource Name (ARN) of the task execution role that grants the Hook permission.", "LoggingConfig": "Contains logging configuration information for an extension.", - "SchemaHandlerPackage": "A URL to the Amazon S3 bucket containing the hook project package that contains the necessary files for the hook you want to register.\n\nFor information on generating a schema handler package for the resource you want to register, see [submit](https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/resource-type-cli-submit.html) in the *CloudFormation CLI User Guide for Extension Development* .\n\n> The user registering the resource must be able to access the package in the S3 bucket. That's, the user must have [GetObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) permissions for the schema handler package. For more information, see [Actions, Resources, and Condition Keys for Amazon S3](https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazons3.html) in the *AWS Identity and Access Management User Guide* .", + "SchemaHandlerPackage": "A URL to the Amazon S3 bucket containing the Hook project package that contains the necessary files for the Hook you want to register.\n\nFor information on generating a schema handler package for the resource you want to register, see [submit](https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/resource-type-cli-submit.html) in the *CloudFormation CLI User Guide for Extension Development* .\n\n> The user registering the resource must be able to access the package in the S3 bucket. That's, the user must have [GetObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) permissions for the schema handler package. For more information, see [Actions, Resources, and Condition Keys for Amazon S3](https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazons3.html) in the *AWS Identity and Access Management User Guide* .", "TypeName": "The unique name for your hook. Specifies a three-part namespace for your hook, with a recommended pattern of `Organization::Service::Hook` .\n\n> The following organization namespaces are reserved and can't be used in your hook type names:\n> \n> - `Alexa`\n> - `AMZN`\n> - `Amazon`\n> - `ASK`\n> - `AWS`\n> - `Custom`\n> - `Dev`" }, "AWS::CloudFormation::HookVersion LoggingConfig": { "LogGroupName": "The Amazon CloudWatch Logs group to which CloudFormation sends error logging information when invoking the extension's handlers.", "LogRoleArn": "The Amazon Resource Name (ARN) of the role that CloudFormation should assume when sending log entries to CloudWatch Logs." }, + "AWS::CloudFormation::LambdaHook": { + "Alias": "The type name alias for the Hook. This alias must be unique per account and Region.\n\nThe alias must be in the form `Name1::Name2::Name3` and must not begin with `AWS` . For example, `Private::Lambda::MyTestHook` .", + "ExecutionRole": "The IAM role that the Hook assumes to invoke your Lambda function.", + "FailureMode": "Specifies how the Hook responds when the Lambda function invoked by the Hook returns a `FAILED` response.\n\n- `FAIL` : Prevents the action from proceeding. This is helpful for enforcing strict compliance or security policies.\n- `WARN` : Issues warnings to users but allows actions to continue. This is useful for non-critical validations or informational checks.", + "HookStatus": "Specifies if the Hook is `ENABLED` or `DISABLED` .", + "LambdaFunction": "Specifies the Lambda function for the Hook. You can use:\n\n- The full Amazon Resource Name (ARN) without a suffix.\n- A qualified ARN with a version or alias suffix.", + "StackFilters": "Specifies the stack level filters for the Hook.", + "TargetFilters": "Specifies the target filters for the Hook.", + "TargetOperations": "Specifies which type of operation the Hook is run against.\n\nValid values: `STACK` | `RESOURCE` | `CHANGE_SET` | `CLOUD_CONTROL`" + }, + "AWS::CloudFormation::LambdaHook StackFilters": { + "FilteringCriteria": "The filtering criteria.\n\n- All stack names and stack roles ( `All` ): The Hook will only be invoked when all specified filters match.\n- Any stack names and stack roles ( `Any` ): The Hook will be invoked if at least one of the specified filters match.", + "StackNames": "Includes or excludes specific stacks from Hook invocations.", + "StackRoles": "Includes or excludes specific stacks from Hook invocations based on their associated IAM roles." + }, + "AWS::CloudFormation::LambdaHook StackNames": { + "Exclude": "The stack names to exclude. All stacks except those listed here will invoke the Hook.", + "Include": "The stack names to include. Only the stacks specified in this list will invoke the Hook." + }, + "AWS::CloudFormation::LambdaHook StackRoles": { + "Exclude": "The IAM role ARNs for stacks you want to exclude. The Hook will be invoked on all stacks except those initiated by the specified roles.", + "Include": "The IAM role ARNs to target stacks associated with these roles. Only stack operations initiated by these roles will invoke the Hook." + }, + "AWS::CloudFormation::LambdaHook TargetFilters": { + "TargetFiltersItems": "The specific resource types, actions, and invocation points to target." + }, + "AWS::CloudFormation::LambdaHook TargetFiltersItems": { + "Actions": "The actions to target.", + "InvocationPoints": "The invocation points to target.", + "TargetNames": "The resource types to target, such as `AWS::S3::Bucket` or `AWS::DynamoDB::Table` ." + }, "AWS::CloudFormation::Macro": { "Description": "A description of the macro.", "FunctionName": "The Amazon Resource Name (ARN) of the underlying AWS Lambda function that you want AWS CloudFormation to invoke when the macro is run.", @@ -6776,7 +6892,7 @@ "OriginAccessIdentity": "The CloudFront origin access identity to associate with the distribution. Use an origin access identity to configure the distribution so that end users can only access objects in an Amazon S3 through CloudFront .\n\n> This property is legacy. We recommend that you use [OriginAccessControl](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudfront-originaccesscontrol.html) instead." }, "AWS::CloudFront::Distribution Logging": { - "Bucket": "The Amazon S3 bucket to store the access logs in, for example, `myawslogbucket.s3.amazonaws.com` .", + "Bucket": "The Amazon S3 bucket to store the access logs in, for example, `amzn-s3-demo-bucket.s3.amazonaws.com` .", "IncludeCookies": "Specifies whether you want CloudFront to include cookies in access logs, specify `true` for `IncludeCookies` . If you choose to include cookies in logs, CloudFront logs all cookies regardless of how you configure the cache behaviors for this distribution. If you don't want to include cookies when you create a distribution or if you want to disable include cookies for an existing distribution, specify `false` for `IncludeCookies` .", "Prefix": "An optional string that you want CloudFront to prefix to the access log `filenames` for this distribution, for example, `myprefix/` . If you want to enable logging, but you don't want to specify a prefix, you still must include an empty `Prefix` element in the `Logging` element." }, @@ -7031,7 +7147,7 @@ "Tags": "A complex type that contains zero or more `Tag` elements." }, "AWS::CloudFront::StreamingDistribution Logging": { - "Bucket": "The Amazon S3 bucket to store the access logs in, for example, `myawslogbucket.s3.amazonaws.com` .", + "Bucket": "The Amazon S3 bucket to store the access logs in, for example, `amzn-s3-demo-bucket.s3.amazonaws.com` .", "Enabled": "Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you don't want to enable logging when you create a streaming distribution or if you want to disable logging for an existing streaming distribution, specify `false` for `Enabled` , and specify `empty Bucket` and `Prefix` elements. If you specify `false` for `Enabled` but you specify values for `Bucket` and `Prefix` , the values are automatically deleted.", "Prefix": "An optional string that you want CloudFront to prefix to the access log filenames for this streaming distribution, for example, `myprefix/` . If you want to enable logging, but you don't want to specify a prefix, you still must include an empty `Prefix` element in the `Logging` element." }, @@ -7093,7 +7209,7 @@ "AWS::CloudTrail::EventDataStore AdvancedFieldSelector": { "EndsWith": "An operator that includes events that match the last few characters of the event record field specified as the value of `Field` .", "Equals": "An operator that includes events that match the exact value of the event record field specified as the value of `Field` . This is the only valid operator that you can use with the `readOnly` , `eventCategory` , and `resources.type` fields.", - "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events and network activity events.\n\nFor management events, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", + "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` . The following additional fields are available for event data stores: `eventName` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` . The following additional fields are available for event data stores: `eventSource` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events, data events (for event data stores only), and network activity events.\n\nFor management events for trails, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor management and data events for event data stores, you can use it to include or exclude any event source and can use any operator.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events, management events (for event data stores only), and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For events outside of AWS , the value must be `ActivityAuditLog` .\n- *`eventType`* - This is an optional field available only for event data stores, which is used to filter management and data events on the event type. For information about available event types, see [CloudTrail record contents](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-record-contents.html#ct-event-type) in the *AWS CloudTrail user guide* .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`sessionCredentialFromConsole`* - This is an optional field available only for event data stores, which is used to filter management and data events based on whether the events originated from an AWS Management Console session. `sessionCredentialFromConsole` can only use the `Equals` and `NotEquals` operators.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`userIdentity.arn`* - This is an optional field available only for event data stores, which is used to filter management and data events on the userIdentity ARN. You can use any operator with `userIdentity.arn` . For more information on the userIdentity element, see [CloudTrail userIdentity element](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-user-identity.html) in the *AWS CloudTrail User Guide* .\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", "NotEndsWith": "An operator that excludes events that match the last few characters of the event record field specified as the value of `Field` .", "NotEquals": "An operator that excludes events that match the exact value of the event record field specified as the value of `Field` .", "NotStartsWith": "An operator that excludes events that match the first few characters of the event record field specified as the value of `Field` .", @@ -7135,7 +7251,7 @@ "AWS::CloudTrail::Trail AdvancedFieldSelector": { "EndsWith": "An operator that includes events that match the last few characters of the event record field specified as the value of `Field` .", "Equals": "An operator that includes events that match the exact value of the event record field specified as the value of `Field` . This is the only valid operator that you can use with the `readOnly` , `eventCategory` , and `resources.type` fields.", - "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events and network activity events.\n\nFor management events, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", + "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` . The following additional fields are available for event data stores: `eventName` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` . The following additional fields are available for event data stores: `eventSource` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events, data events (for event data stores only), and network activity events.\n\nFor management events for trails, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor management and data events for event data stores, you can use it to include or exclude any event source and can use any operator.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events, management events (for event data stores only), and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For events outside of AWS , the value must be `ActivityAuditLog` .\n- *`eventType`* - This is an optional field available only for event data stores, which is used to filter management and data events on the event type. For information about available event types, see [CloudTrail record contents](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-record-contents.html#ct-event-type) in the *AWS CloudTrail user guide* .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`sessionCredentialFromConsole`* - This is an optional field available only for event data stores, which is used to filter management and data events based on whether the events originated from an AWS Management Console session. `sessionCredentialFromConsole` can only use the `Equals` and `NotEquals` operators.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`userIdentity.arn`* - This is an optional field available only for event data stores, which is used to filter management and data events on the userIdentity ARN. You can use any operator with `userIdentity.arn` . For more information on the userIdentity element, see [CloudTrail userIdentity element](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-user-identity.html) in the *AWS CloudTrail User Guide* .\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", "NotEndsWith": "An operator that excludes events that match the last few characters of the event record field specified as the value of `Field` .", "NotEquals": "An operator that excludes events that match the exact value of the event record field specified as the value of `Field` .", "NotStartsWith": "An operator that excludes events that match the first few characters of the event record field specified as the value of `Field` .", @@ -7370,19 +7486,46 @@ }, "AWS::CodeBuild::Fleet": { "BaseCapacity": "The initial number of machines allocated to the compute \ufb02eet, which de\ufb01nes the number of builds that can run in parallel.", + "ComputeConfiguration": "The compute configuration of the compute fleet. This is only required if `computeType` is set to `ATTRIBUTE_BASED_COMPUTE` .", "ComputeType": "Information about the compute resources the compute fleet uses. Available values include:\n\n- `ATTRIBUTE_BASED_COMPUTE` : Specify the amount of vCPUs, memory, disk space, and the type of machine.\n\n> If you use `ATTRIBUTE_BASED_COMPUTE` , you must define your attributes by using `computeConfiguration` . AWS CodeBuild will select the cheapest instance that satisfies your specified attributes. For more information, see [Reserved capacity environment types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html#environment-reserved-capacity.types) in the *AWS CodeBuild User Guide* .\n- `BUILD_GENERAL1_SMALL` : Use up to 4 GiB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 8 GiB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GiB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 72 GiB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 144 GiB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n- `BUILD_LAMBDA_1GB` : Use up to 1 GiB memory for builds. Only available for environment type `LINUX_LAMBDA_CONTAINER` and `ARM_LAMBDA_CONTAINER` .\n- `BUILD_LAMBDA_2GB` : Use up to 2 GiB memory for builds. Only available for environment type `LINUX_LAMBDA_CONTAINER` and `ARM_LAMBDA_CONTAINER` .\n- `BUILD_LAMBDA_4GB` : Use up to 4 GiB memory for builds. Only available for environment type `LINUX_LAMBDA_CONTAINER` and `ARM_LAMBDA_CONTAINER` .\n- `BUILD_LAMBDA_8GB` : Use up to 8 GiB memory for builds. Only available for environment type `LINUX_LAMBDA_CONTAINER` and `ARM_LAMBDA_CONTAINER` .\n- `BUILD_LAMBDA_10GB` : Use up to 10 GiB memory for builds. Only available for environment type `LINUX_LAMBDA_CONTAINER` and `ARM_LAMBDA_CONTAINER` .\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 4 GiB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GiB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GiB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 16 GiB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GiB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GiB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [On-demand environment types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html#environment.types) in the *AWS CodeBuild User Guide.*", - "EnvironmentType": "The environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `MAC_ARM` is available only in regions US East (Ohio), US East (N. Virginia), US West (Oregon), Europe (Frankfurt), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* .", + "EnvironmentType": "The environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo).\n- The environment type `ARM_EC2` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_EC2` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `MAC_ARM` is available only in regions US East (Ohio), US East (N. Virginia), US West (Oregon), Europe (Frankfurt), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_EC2` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* .", + "FleetProxyConfiguration": "Information about the proxy configurations that apply network access control to your reserved capacity instances.", "FleetServiceRole": "The service role associated with the compute fleet. For more information, see [Allow a user to add a permission policy for a fleet service role](https://docs.aws.amazon.com/codebuild/latest/userguide/auth-and-access-control-iam-identity-based-access-control.html#customer-managed-policies-example-permission-policy-fleet-service-role.html) in the *AWS CodeBuild User Guide* .", "FleetVpcConfig": "Information about the VPC configuration that AWS CodeBuild accesses.", "ImageId": "The Amazon Machine Image (AMI) of the compute fleet.", "Name": "The name of the compute fleet.", "OverflowBehavior": "The compute fleet overflow behavior.\n\n- For overflow behavior `QUEUE` , your overflow builds need to wait on the existing fleet instance to become available.\n- For overflow behavior `ON_DEMAND` , your overflow builds run on CodeBuild on-demand.\n\n> If you choose to set your overflow behavior to on-demand while creating a VPC-connected fleet, make sure that you add the required VPC permissions to your project service role. For more information, see [Example policy statement to allow CodeBuild access to AWS services required to create a VPC network interface](https://docs.aws.amazon.com/codebuild/latest/userguide/auth-and-access-control-iam-identity-based-access-control.html#customer-managed-policies-example-create-vpc-network-interface) .", + "ScalingConfiguration": "The scaling configuration of the compute fleet.", "Tags": "A list of tag key and value pairs associated with this compute fleet.\n\nThese tags are available for use by AWS services that support AWS CodeBuild compute fleet tags." }, + "AWS::CodeBuild::Fleet ComputeConfiguration": { + "disk": "The amount of disk space of the instance type included in your fleet.", + "machineType": "The machine type of the instance type included in your fleet.", + "memory": "The amount of memory of the instance type included in your fleet.", + "vCpu": "The number of vCPUs of the instance type included in your fleet." + }, + "AWS::CodeBuild::Fleet FleetProxyRule": { + "Effect": "The behavior of the proxy rule.", + "Entities": "The destination of the proxy rule.", + "Type": "The type of proxy rule." + }, + "AWS::CodeBuild::Fleet ProxyConfiguration": { + "DefaultBehavior": "The default behavior of outgoing traffic.", + "OrderedProxyRules": "An array of `FleetProxyRule` objects that represent the specified destination domains or IPs to allow or deny network access control to." + }, + "AWS::CodeBuild::Fleet ScalingConfigurationInput": { + "MaxCapacity": "The maximum number of instances in the \ufb02eet when auto-scaling.", + "ScalingType": "The scaling type for a compute fleet.", + "TargetTrackingScalingConfigs": "A list of `TargetTrackingScalingConfiguration` objects." + }, "AWS::CodeBuild::Fleet Tag": { "Key": "The tag's key.", "Value": "The tag's value." }, + "AWS::CodeBuild::Fleet TargetTrackingScalingConfiguration": { + "MetricType": "The metric type to determine auto-scaling.", + "TargetValue": "The value of `metricType` when to start scaling." + }, "AWS::CodeBuild::Fleet VpcConfig": { "SecurityGroupIds": "A list of one or more security groups IDs in your Amazon VPC.", "Subnets": "A list of one or more subnet IDs in your Amazon VPC.", @@ -7513,7 +7656,7 @@ "GitSubmodulesConfig": "Information about the Git submodules configuration for the build project.", "InsecureSsl": "This is used with GitHub Enterprise only. Set to true to ignore SSL warnings while connecting to your GitHub Enterprise project repository. The default value is `false` . `InsecureSsl` should be used for testing purposes only. It should not be used in a production environment.", "Location": "Information about the location of the source code to be built. Valid values include:\n\n- For source code settings that are specified in the source action of a pipeline in CodePipeline, `location` should not be specified. If it is specified, CodePipeline ignores it. This is because CodePipeline uses the settings in a pipeline's source action instead of this value.\n- For source code in an CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the buildspec file (for example, `https://git-codecommit..amazonaws.com/v1/repos/` ).\n- For source code in an Amazon S3 input bucket, one of the following.\n\n- The path to the ZIP file that contains the source code (for example, `//.zip` ).\n- The path to the folder that contains the source code (for example, `///` ).\n- For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your GitHub account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub *Authorize application* page, for *Organization access* , choose *Request access* next to each repository you want to allow AWS CodeBuild to have access to, and then choose *Authorize application* . (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the `source` object, set the `auth` object's `type` value to `OAUTH` .\n- For source code in an GitLab or self-managed GitLab repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your GitLab account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitLab, on the Connections *Authorize application* page, choose *Authorize* . Then on the AWS CodeConnections *Create GitLab connection* page, choose *Connect to GitLab* . (After you have connected to your GitLab account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to override the default connection and use this connection instead, set the `auth` object's `type` value to `CODECONNECTIONS` in the `source` object.\n- For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your Bitbucket account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket *Confirm access to your account* page, choose *Grant access* . (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the `source` object, set the `auth` object's `type` value to `OAUTH` .\n\nIf you specify `CODEPIPELINE` for the `Type` property, don't specify this property. For all of the other types, you must specify `Location` .", - "ReportBuildStatus": "Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, or Bitbucket. If this is set and you use a different source provider, an `invalidInputException` is thrown.", + "ReportBuildStatus": "Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, or Bitbucket. If this is set and you use a different source provider, an `invalidInputException` is thrown.", "SourceIdentifier": "An identifier for this project source. The identifier can only contain alphanumeric characters and underscores, and must be less than 128 characters in length.", "Type": "The type of repository that contains the source code to be built. Valid values include:\n\n- `BITBUCKET` : The source code is in a Bitbucket repository.\n- `CODECOMMIT` : The source code is in an CodeCommit repository.\n- `CODEPIPELINE` : The source code settings are specified in the source action of a pipeline in CodePipeline.\n- `GITHUB` : The source code is in a GitHub repository.\n- `GITHUB_ENTERPRISE` : The source code is in a GitHub Enterprise Server repository.\n- `GITLAB` : The source code is in a GitLab repository.\n- `GITLAB_SELF_MANAGED` : The source code is in a self-managed GitLab repository.\n- `NO_SOURCE` : The project does not have input source code.\n- `S3` : The source code is in an Amazon S3 bucket." }, @@ -8845,6 +8988,7 @@ "AWS::Connect::HoursOfOperation": { "Config": "Configuration information for the hours of operation.", "Description": "The description for the hours of operation.", + "HoursOfOperationOverrides": "", "InstanceArn": "The Amazon Resource Name (ARN) of the instance.", "Name": "The name for the hours of operation.", "Tags": "The tags used to organize, track, or control access for this resource. For example, { \"Tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.", @@ -8855,10 +8999,27 @@ "EndTime": "The end time that your contact center closes.", "StartTime": "The start time that your contact center opens." }, + "AWS::Connect::HoursOfOperation HoursOfOperationOverride": { + "EffectiveFrom": "", + "EffectiveTill": "", + "HoursOfOperationOverrideId": "", + "OverrideConfig": "", + "OverrideDescription": "", + "OverrideName": "" + }, + "AWS::Connect::HoursOfOperation HoursOfOperationOverrideConfig": { + "Day": "", + "EndTime": "", + "StartTime": "" + }, "AWS::Connect::HoursOfOperation HoursOfOperationTimeSlice": { "Hours": "The hours.", "Minutes": "The minutes." }, + "AWS::Connect::HoursOfOperation OverrideTimeSlice": { + "Hours": "", + "Minutes": "" + }, "AWS::Connect::HoursOfOperation Tag": { "Key": "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -", "Value": "The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -" @@ -9308,8 +9469,8 @@ "DialingCapacity": "The allocation of dialing capacity between multiple active campaigns." }, "AWS::ConnectCampaigns::Campaign Tag": { - "Key": "", - "Value": "" + "Key": "The tag keys.", + "Value": "The value of the tag." }, "AWS::ControlTower::EnabledBaseline": { "BaselineIdentifier": "The specific `Baseline` enabled as part of the `EnabledBaseline` resource.", @@ -9467,6 +9628,7 @@ }, "AWS::CustomerProfiles::Integration": { "DomainName": "The unique name of the domain.", + "EventTriggerNames": "", "FlowDefinition": "The configuration that controls how Customer Profiles retrieves data from the source.", "ObjectTypeName": "The name of the profile object type mapping to use.", "ObjectTypeNames": "The object type mapping.", @@ -9591,6 +9753,95 @@ "Key": "A string you can use to assign a value. The combination of tag keys and values can help you organize and categorize your resources.", "Value": "The value for the specified tag key." }, + "AWS::CustomerProfiles::SegmentDefinition": { + "Description": "The description of the segment definition.", + "DisplayName": "Display name of the segment definition.", + "DomainName": "The name of the domain.", + "SegmentDefinitionName": "Name of the segment definition.", + "SegmentGroups": "Contains all groups of the segment definition.", + "Tags": "The tags belonging to the segment definition." + }, + "AWS::CustomerProfiles::SegmentDefinition AddressDimension": { + "City": "The city belonging to the address.", + "Country": "The country belonging to the address.", + "County": "The county belonging to the address.", + "PostalCode": "The postal code belonging to the address.", + "Province": "The province belonging to the address.", + "State": "The state belonging to the address." + }, + "AWS::CustomerProfiles::SegmentDefinition AttributeDimension": { + "DimensionType": "The action to segment with.", + "Values": "The values to apply the DimensionType on." + }, + "AWS::CustomerProfiles::SegmentDefinition CalculatedAttributeDimension": { + "ConditionOverrides": "Applies the given condition over the initial Calculated Attribute's definition.", + "DimensionType": "The action to segment with.", + "Values": "The values to apply the DimensionType with." + }, + "AWS::CustomerProfiles::SegmentDefinition ConditionOverrides": { + "Range": "The relative time period over which data is included in the aggregation for this override." + }, + "AWS::CustomerProfiles::SegmentDefinition DateDimension": { + "DimensionType": "The action to segment on.", + "Values": "The values to apply the DimensionType on." + }, + "AWS::CustomerProfiles::SegmentDefinition Dimension": { + "CalculatedAttributes": "Object that holds the calculated attributes to segment on.", + "ProfileAttributes": "Object that holds the profile attributes to segment on." + }, + "AWS::CustomerProfiles::SegmentDefinition ExtraLengthValueProfileDimension": { + "DimensionType": "The action to segment with.", + "Values": "The values to apply the DimensionType on." + }, + "AWS::CustomerProfiles::SegmentDefinition Group": { + "Dimensions": "Defines the attributes to segment on.", + "SourceSegments": "Defines the starting source of data.", + "SourceType": "Defines how to interact with the source data.", + "Type": "Defines how to interact with the profiles found in the current filtering." + }, + "AWS::CustomerProfiles::SegmentDefinition ProfileAttributes": { + "AccountNumber": "A field to describe values to segment on within account number.", + "AdditionalInformation": "A field to describe values to segment on within additional information.", + "Address": "A field to describe values to segment on within address.", + "Attributes": "A field to describe values to segment on within attributes.", + "BillingAddress": "A field to describe values to segment on within billing address.", + "BirthDate": "A field to describe values to segment on within birthDate.", + "BusinessEmailAddress": "A field to describe values to segment on within business email address.", + "BusinessName": "A field to describe values to segment on within business name.", + "BusinessPhoneNumber": "A field to describe values to segment on within business phone number.", + "EmailAddress": "A field to describe values to segment on within email address.", + "FirstName": "A field to describe values to segment on within first name.", + "GenderString": "A field to describe values to segment on within genderString.", + "HomePhoneNumber": "A field to describe values to segment on within home phone number.", + "LastName": "A field to describe values to segment on within last name.", + "MailingAddress": "A field to describe values to segment on within mailing address.", + "MiddleName": "A field to describe values to segment on within middle name.", + "MobilePhoneNumber": "A field to describe values to segment on within mobile phone number.", + "PartyTypeString": "A field to describe values to segment on within partyTypeString.", + "PersonalEmailAddress": "A field to describe values to segment on within personal email address.", + "PhoneNumber": "A field to describe values to segment on within phone number.", + "ShippingAddress": "A field to describe values to segment on within shipping address." + }, + "AWS::CustomerProfiles::SegmentDefinition ProfileDimension": { + "DimensionType": "The action to segment on.", + "Values": "" + }, + "AWS::CustomerProfiles::SegmentDefinition RangeOverride": { + "End": "The end time of when to include objects.", + "Start": "The start time of when to include objects.", + "Unit": "The unit for start and end." + }, + "AWS::CustomerProfiles::SegmentDefinition SegmentGroup": { + "Groups": "Holds the list of groups within the segment definition.", + "Include": "Defines whether to include or exclude the profiles that fit the segment criteria." + }, + "AWS::CustomerProfiles::SegmentDefinition SourceSegment": { + "SegmentDefinitionName": "The name of the source segment." + }, + "AWS::CustomerProfiles::SegmentDefinition Tag": { + "Key": "", + "Value": "One part of a key-value pair that make up a tag. A value acts as a descriptor within a tag category (key). The value can be empty or null." + }, "AWS::DAX::Cluster": { "AvailabilityZones": "The Availability Zones (AZs) in which the cluster nodes will reside after the cluster has been created or updated. If provided, the length of this list must equal the `ReplicationFactor` parameter. If you omit this parameter, DAX will spread the nodes across Availability Zones for the highest availability.", "ClusterEndpointEncryptionType": "The encryption type of the cluster's endpoint. Available values are:\n\n- `NONE` - The cluster's endpoint will be unencrypted.\n- `TLS` - The cluster's endpoint will be encrypted with Transport Layer Security, and will provide an x509 certificate for authentication.\n\nThe default value is `NONE` .", @@ -11070,10 +11321,10 @@ "Type": "The search filter explresison type." }, "AWS::DataZone::DataSource FormInput": { - "Content": "The content of the metadata form.", - "FormName": "The name of the metadata form.", - "TypeIdentifier": "The ID of the metadata form type.", - "TypeRevision": "The revision of the metadata form type." + "Content": "", + "FormName": "", + "TypeIdentifier": "", + "TypeRevision": "" }, "AWS::DataZone::DataSource GlueRunConfigurationInput": { "AutoImportDataQualityResult": "Specifies whether to automatically import data quality metrics as part of the data source run.", @@ -11254,9 +11505,17 @@ "RoleArn": "The IAM role that workers in the fleet use when processing jobs.", "Tags": "The tags to add to your fleet. Each tag consists of a tag key and a tag value. Tag keys and values are both required, but tag values can be empty strings." }, + "AWS::Deadline::Fleet AcceleratorCapabilities": { + "Count": "The number of GPU accelerators specified for worker hosts in this fleet.", + "Selections": "A list of accelerator capabilities requested for this fleet. Only Amazon Elastic Compute Cloud instances that provide these capabilities will be used. For example, if you specify both L4 and T4 chips, Deadline Cloud will use Amazon EC2 instances that have either the L4 or the T4 chip installed." + }, "AWS::Deadline::Fleet AcceleratorCountRange": { - "Max": "The maximum GPU for the accelerator.", - "Min": "The minimum GPU for the accelerator." + "Max": "The maximum number of GPU accelerators in the worker host.", + "Min": "The minimum number of GPU accelerators in the worker host." + }, + "AWS::Deadline::Fleet AcceleratorSelection": { + "Name": "The name of the chip used by the GPU accelerator.\n\nIf you specify `l4` as the name of the accelerator, you must specify `latest` or `grid:r550` as the runtime.\n\nThe available GPU accelerators are:\n\n- `t4` - NVIDIA T4 Tensor Core GPU\n- `a10g` - NVIDIA A10G Tensor Core GPU\n- `l4` - NVIDIA L4 Tensor Core GPU\n- `l40s` - NVIDIA L40S Tensor Core GPU", + "Runtime": "Specifies the runtime driver to use for the GPU accelerator. You must use the same runtime for all GPUs.\n\nYou can choose from the following runtimes:\n\n- `latest` - Use the latest runtime available for the chip. If you specify `latest` and a new version of the runtime is released, the new version of the runtime is used.\n- `grid:r550` - [NVIDIA vGPU software 17](https://docs.aws.amazon.com/https://docs.nvidia.com/vgpu/17.0/index.html)\n- `grid:r535` - [NVIDIA vGPU software 16](https://docs.aws.amazon.com/https://docs.nvidia.com/vgpu/16.0/index.html)\n\nIf you don't specify a runtime, Deadline Cloud uses `latest` as the default. However, if you have multiple accelerators and specify `latest` for some and leave others blank, Deadline Cloud raises an exception." }, "AWS::Deadline::Fleet AcceleratorTotalMemoryMiBRange": { "Max": "The maximum amount of memory to use for the accelerator, measured in MiB.", @@ -11309,6 +11568,7 @@ "InstanceMarketOptions": "The Amazon EC2 market type." }, "AWS::Deadline::Fleet ServiceManagedEc2InstanceCapabilities": { + "AcceleratorCapabilities": "Describes the GPU accelerator capabilities required for worker host instances in this fleet.", "AllowedInstanceTypes": "The allowable Amazon EC2 instance types.", "CpuArchitectureType": "The CPU architecture type.", "CustomAmounts": "The custom capability amounts to require for instances in this fleet.", @@ -11674,6 +11934,7 @@ "StreamSpecification": "Specifies the streams settings on your global table. You must provide a value for this property if your global table contains more than one replica. You can only change the streams settings if your global table has only one replica.", "TableName": "A name for the global table. If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID as the table name. For more information, see [Name type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) .\n\n> If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.", "TimeToLiveSpecification": "Specifies the time to live (TTL) settings for the table. This setting will be applied to all replicas.", + "WarmThroughput": "Provides visibility into the number of read and write operations your table or secondary index can instantaneously support. The settings can be modified using the `UpdateTable` operation to meet the throughput requirements of an upcoming peak event.", "WriteOnDemandThroughputSettings": "Sets the write request settings for a global table or a global secondary index. You can only specify this setting if your resource uses the `PAY_PER_REQUEST` `BillingMode` .", "WriteProvisionedThroughputSettings": "Specifies an auto scaling policy for write capacity. This policy will be applied to all replicas. This setting must be specified if `BillingMode` is set to `PROVISIONED` ." }, @@ -11694,6 +11955,7 @@ "IndexName": "The name of the global secondary index. The name must be unique among all other indexes on this table.", "KeySchema": "The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types:\n\n- `HASH` - partition key\n- `RANGE` - sort key\n\n> The partition key of an item is also known as its *hash attribute* . The term \"hash attribute\" derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values.\n> \n> The sort key of an item is also known as its *range attribute* . The term \"range attribute\" derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.", "Projection": "Represents attributes that are copied (projected) from the table into the global secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.", + "WarmThroughput": "Represents the warm throughput value (in read units per second and write units per second) for the specified secondary index. If you use this parameter, you must specify `ReadUnitsPerSecond` , `WriteUnitsPerSecond` , or both.", "WriteOnDemandThroughputSettings": "Sets the write request settings for a global table or a global secondary index. You can only specify this setting if your resource uses the `PAY_PER_REQUEST` `BillingMode` .", "WriteProvisionedThroughputSettings": "Defines write capacity settings for the global secondary index. You must specify a value for this property if the table's `BillingMode` is `PROVISIONED` . All replicas will have the same write capacity settings for this global secondary index." }, @@ -11775,6 +12037,10 @@ "AttributeName": "The name of the attribute used to store the expiration time for items in the table.\n\nCurrently, you cannot directly change the attribute name used to evaluate time to live. In order to do so, you must first disable time to live, and then re-enable it with the new attribute name. It can take up to one hour for changes to time to live to take effect. If you attempt to modify time to live within that time window, your stack operation might be delayed.", "Enabled": "Indicates whether TTL is to be enabled (true) or disabled (false) on the table." }, + "AWS::DynamoDB::GlobalTable WarmThroughput": { + "ReadUnitsPerSecond": "Represents the number of read operations your base table can instantaneously support.", + "WriteUnitsPerSecond": "Represents the number of write operations your base table can instantaneously support." + }, "AWS::DynamoDB::GlobalTable WriteOnDemandThroughputSettings": { "MaxWriteRequestUnits": "Maximum number of write request settings for the specified replica of a global table." }, @@ -11787,7 +12053,7 @@ "ContributorInsightsSpecification": "The settings used to enable or disable CloudWatch Contributor Insights for the specified table.", "DeletionProtectionEnabled": "Determines if a table is protected from deletion. When enabled, the table cannot be deleted by any user or process. This setting is disabled by default. For more information, see [Using deletion protection](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.Basics.html#WorkingWithTables.Basics.DeletionProtection) in the *Amazon DynamoDB Developer Guide* .", "GlobalSecondaryIndexes": "Global secondary indexes to be created on the table. You can create up to 20 global secondary indexes.\n\n> If you update a table to include a new global secondary index, AWS CloudFormation initiates the index creation and then proceeds with the stack update. AWS CloudFormation doesn't wait for the index to complete creation because the backfilling phase can take a long time, depending on the size of the table. You can't use the index or update the table until the index's status is `ACTIVE` . You can track its status by using the DynamoDB [DescribeTable](https://docs.aws.amazon.com/cli/latest/reference/dynamodb/describe-table.html) command.\n> \n> If you add or delete an index during an update, we recommend that you don't update any other resources. If your stack fails to update and is rolled back while adding a new index, you must manually delete the index.\n> \n> Updates are not supported. The following are exceptions:\n> \n> - If you update either the contributor insights specification or the provisioned throughput values of global secondary indexes, you can update the table without interruption.\n> - You can delete or add one global secondary index without interruption. If you do both in the same update (for example, by changing the index's logical ID), the update fails.", - "ImportSourceSpecification": "Specifies the properties of data being imported from the S3 bucket source to the table.\n\n> If you specify the `ImportSourceSpecification` property, and also specify either the `StreamSpecification` , the `TableClass` property, or the `DeletionProtectionEnabled` property, the IAM entity creating/updating stack must have `UpdateTable` permission.", + "ImportSourceSpecification": "Specifies the properties of data being imported from the S3 bucket source to the\" table.\n\n> If you specify the `ImportSourceSpecification` property, and also specify either the `StreamSpecification` , the `TableClass` property, the `DeletionProtectionEnabled` property, or the `WarmThroughput` property, the IAM entity creating/updating stack must have `UpdateTable` permission.", "KeySchema": "Specifies the attributes that make up the primary key for the table. The attributes in the `KeySchema` property must also be defined in the `AttributeDefinitions` property.", "KinesisStreamSpecification": "The Kinesis Data Streams configuration for the specified table.", "LocalSecondaryIndexes": "Local secondary indexes to be created on the table. You can create up to 5 local secondary indexes. Each index is scoped to a given hash key value. The size of each hash key can be up to 10 gigabytes.", @@ -11800,7 +12066,8 @@ "TableClass": "The table class of the new table. Valid values are `STANDARD` and `STANDARD_INFREQUENT_ACCESS` .", "TableName": "A name for the table. If you don't specify a name, AWS CloudFormation generates a unique physical ID and uses that ID for the table name. For more information, see [Name Type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) .\n\n> If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.", "Tags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", - "TimeToLiveSpecification": "Specifies the Time to Live (TTL) settings for the table.\n\n> For detailed information about the limits in DynamoDB, see [Limits in Amazon DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) in the Amazon DynamoDB Developer Guide." + "TimeToLiveSpecification": "Specifies the Time to Live (TTL) settings for the table.\n\n> For detailed information about the limits in DynamoDB, see [Limits in Amazon DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) in the Amazon DynamoDB Developer Guide.", + "WarmThroughput": "Represents the warm throughput (in read units per second and write units per second) for creating a table." }, "AWS::DynamoDB::Table AttributeDefinition": { "AttributeName": "A name for the attribute.", @@ -11819,7 +12086,8 @@ "KeySchema": "The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types:\n\n- `HASH` - partition key\n- `RANGE` - sort key\n\n> The partition key of an item is also known as its *hash attribute* . The term \"hash attribute\" derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values.\n> \n> The sort key of an item is also known as its *range attribute* . The term \"range attribute\" derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.", "OnDemandThroughput": "The maximum number of read and write units for the specified global secondary index. If you use this parameter, you must specify `MaxReadRequestUnits` , `MaxWriteRequestUnits` , or both.", "Projection": "Represents attributes that are copied (projected) from the table into the global secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.", - "ProvisionedThroughput": "Represents the provisioned throughput settings for the specified global secondary index.\n\nFor current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) in the *Amazon DynamoDB Developer Guide* ." + "ProvisionedThroughput": "Represents the provisioned throughput settings for the specified global secondary index.\n\nFor current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) in the *Amazon DynamoDB Developer Guide* .", + "WarmThroughput": "Represents the warm throughput value (in read units per second and write units per second) for the specified secondary index. If you use this parameter, you must specify `ReadUnitsPerSecond` , `WriteUnitsPerSecond` , or both." }, "AWS::DynamoDB::Table ImportSourceSpecification": { "InputCompressionType": "Type of compression to be used on the input coming from the imported table.", @@ -11883,6 +12151,10 @@ "AttributeName": "The name of the TTL attribute used to store the expiration time for items in the table.\n\n> - The `AttributeName` property is required when enabling the TTL, or when TTL is already enabled.\n> - To update this property, you must first disable TTL and then enable TTL with the new attribute name.", "Enabled": "Indicates whether TTL is to be enabled (true) or disabled (false) on the table." }, + "AWS::DynamoDB::Table WarmThroughput": { + "ReadUnitsPerSecond": "Represents the number of read operations your base table can instantaneously support.", + "WriteUnitsPerSecond": "Represents the number of write operations your base table can instantaneously support." + }, "AWS::EC2::CapacityReservation": { "AvailabilityZone": "The Availability Zone in which to create the Capacity Reservation.", "EbsOptimized": "Indicates whether the Capacity Reservation supports EBS-optimized instances. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS- optimized instance.", @@ -13130,7 +13402,7 @@ "AWS::EC2::PrefixList": { "AddressFamily": "The IP address type.\n\nValid Values: `IPv4` | `IPv6`", "Entries": "The entries for the prefix list.", - "MaxEntries": "The maximum number of entries for the prefix list.", + "MaxEntries": "The maximum number of entries for the prefix list. You can't modify the entries and the size of a prefix list at the same time.\n\nThis property is required when you create a prefix list.", "PrefixListName": "A name for the prefix list.\n\nConstraints: Up to 255 characters in length. The name cannot start with `com.amazonaws` .", "Tags": "The tags for the prefix list." }, @@ -14148,6 +14420,7 @@ "TaskSetId": "The short name or full Amazon Resource Name (ARN) of the task set to set as the primary task set in the deployment." }, "AWS::ECS::Service": { + "AvailabilityZoneRebalancing": "Indicates whether to use Availability Zone rebalancing for the service.\n\nFor more information, see [Balancing an Amazon ECS service across Availability Zones](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-rebalancing.html) in the *Amazon Elastic Container Service Developer Guide* .", "CapacityProviderStrategy": "The capacity provider strategy to use for the service.\n\nIf a `capacityProviderStrategy` is specified, the `launchType` parameter must be omitted. If no `capacityProviderStrategy` or `launchType` is specified, the `defaultCapacityProviderStrategy` for the cluster is used.\n\nA capacity provider strategy may contain a maximum of 6 capacity providers.", "Cluster": "The short name or full Amazon Resource Name (ARN) of the cluster that you run your service on. If you do not specify a cluster, the default cluster is assumed.", "DeploymentConfiguration": "Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping and starting tasks.", @@ -14155,7 +14428,7 @@ "DesiredCount": "The number of instantiations of the specified task definition to place and keep running in your service.\n\nFor new services, if a desired count is not specified, a default value of `1` is used. When using the `DAEMON` scheduling strategy, the desired count is not required.\n\nFor existing services, if a desired count is not specified, it is omitted from the operation.", "EnableECSManagedTags": "Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For more information, see [Tagging your Amazon ECS resources](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nWhen you use Amazon ECS managed tags, you need to set the `propagateTags` request parameter.", "EnableExecuteCommand": "Determines whether the execute command functionality is turned on for the service. If `true` , the execute command functionality is turned on for all containers in tasks as part of the service.", - "HealthCheckGracePeriodSeconds": "The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy Elastic Load Balancing target health checks after a task has first started. This is only used when your service is configured to use a load balancer. If your service has a load balancer defined and you don't specify a health check grace period value, the default value of `0` is used.\n\nIf you do not use an Elastic Load Balancing, we recommend that you use the `startPeriod` in the task definition health check parameters. For more information, see [Health check](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_HealthCheck.html) .\n\nIf your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can specify a health check grace period of up to 2,147,483,647 seconds (about 69 years). During that time, the Amazon ECS service scheduler ignores health check status. This grace period can prevent the service scheduler from marking tasks as unhealthy and stopping them before they have time to come up.", + "HealthCheckGracePeriodSeconds": "The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy Elastic Load Balancing, VPC Lattice, and container health checks after a task has first started. If you don't specify a health check grace period value, the default value of `0` is used. If you don't use any of the health checks, then `healthCheckGracePeriodSeconds` is unused.\n\nIf your service's tasks take a while to start and respond to health checks, you can specify a health check grace period of up to 2,147,483,647 seconds (about 69 years). During that time, the Amazon ECS service scheduler ignores health check status. This grace period can prevent the service scheduler from marking tasks as unhealthy and stopping them before they have time to come up.", "LaunchType": "The launch type on which to run your service. For more information, see [Amazon ECS Launch Types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) in the *Amazon Elastic Container Service Developer Guide* .", "LoadBalancers": "A list of load balancer objects to associate with the service. If you specify the `Role` property, `LoadBalancers` must be specified as well. For information about the number of load balancers that you can specify per service, see [Service Load Balancing](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-load-balancing.html) in the *Amazon Elastic Container Service Developer Guide* .", "NetworkConfiguration": "The network configuration for the service. This parameter is required for task definitions that use the `awsvpc` network mode to receive their own elastic network interface, and it is not supported for other network modes. For more information, see [Task Networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) in the *Amazon Elastic Container Service Developer Guide* .", @@ -14170,7 +14443,8 @@ "ServiceRegistries": "The details of the service discovery registry to associate with this service. For more information, see [Service discovery](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html) .\n\n> Each service may be associated with one service registry. Multiple service registries for each service isn't supported.", "Tags": "The metadata that you apply to the service to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. When a service is deleted, the tags are deleted as well.\n\nThe following basic restrictions apply to tags:\n\n- Maximum number of tags per resource - 50\n- For each resource, each tag key must be unique, and each tag key can have only one value.\n- Maximum key length - 128 Unicode characters in UTF-8\n- Maximum value length - 256 Unicode characters in UTF-8\n- If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.\n- Tag keys and values are case-sensitive.\n- Do not use `aws:` , `AWS:` , or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.", "TaskDefinition": "The `family` and `revision` ( `family:revision` ) or full ARN of the task definition to run in your service. If a `revision` isn't specified, the latest `ACTIVE` revision is used.\n\nA task definition must be specified if the service uses either the `ECS` or `CODE_DEPLOY` deployment controllers.\n\nFor more information about deployment types, see [Amazon ECS deployment types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html) .", - "VolumeConfigurations": "The configuration for a volume specified in the task definition as a volume that is configured at launch time. Currently, the only supported volume type is an Amazon EBS volume." + "VolumeConfigurations": "The configuration for a volume specified in the task definition as a volume that is configured at launch time. Currently, the only supported volume type is an Amazon EBS volume.", + "VpcLatticeConfigurations": "The VPC Lattice configuration for the service being created." }, "AWS::ECS::Service AwsVpcConfiguration": { "AssignPublicIp": "Whether the task's elastic network interface receives a public IP address. The default value is `DISABLED` .", @@ -14287,6 +14561,11 @@ "IdleTimeoutSeconds": "The amount of time in seconds a connection will stay active while idle. A value of `0` can be set to disable `idleTimeout` .\n\nThe `idleTimeout` default for `HTTP` / `HTTP2` / `GRPC` is 5 minutes.\n\nThe `idleTimeout` default for `TCP` is 1 hour.", "PerRequestTimeoutSeconds": "The amount of time waiting for the upstream to respond with a complete response per request. A value of `0` can be set to disable `perRequestTimeout` . `perRequestTimeout` can only be set if Service Connect `appProtocol` isn't `TCP` . Only `idleTimeout` is allowed for `TCP` `appProtocol` ." }, + "AWS::ECS::Service VpcLatticeConfiguration": { + "PortName": "The name of the port mapping to register in the VPC Lattice target group. This is the name of the `portMapping` you defined in your task definition.", + "RoleArn": "The ARN of the IAM role to associate with this VPC Lattice configuration. This is the Amazon ECS infrastructure IAM role that is used to manage your VPC Lattice infrastructure.", + "TargetGroupArn": "The full Amazon Resource Name (ARN) of the target group or groups associated with the VPC Lattice configuration that the Amazon ECS tasks will be registered to." + }, "AWS::ECS::TaskDefinition": { "ContainerDefinitions": "A list of container definitions in JSON format that describe the different containers that make up your task. For more information about container definition parameters and defaults, see [Amazon ECS Task Definitions](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html) in the *Amazon Elastic Container Service Developer Guide* .", "Cpu": "The number of `cpu` units used by the task. If you use the EC2 launch type, this field is optional. Any value can be used. If you use the Fargate launch type, this field is required. You must use one of the following values. The value that you choose determines your range of valid values for the `memory` parameter.\n\nIf you use the EC2 launch type, this field is optional. Supported values are between `128` CPU units ( `0.125` vCPUs) and `10240` CPU units ( `10` vCPUs).\n\nThe CPU units cannot be less than 1 vCPU when you use Windows containers on Fargate.\n\n- 256 (.25 vCPU) - Available `memory` values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)\n- 512 (.5 vCPU) - Available `memory` values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)\n- 1024 (1 vCPU) - Available `memory` values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)\n- 2048 (2 vCPU) - Available `memory` values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)\n- 4096 (4 vCPU) - Available `memory` values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)\n- 8192 (8 vCPU) - Available `memory` values: 16 GB and 60 GB in 4 GB increments\n\nThis option requires Linux platform `1.4.0` or later.\n- 16384 (16vCPU) - Available `memory` values: 32GB and 120 GB in 8 GB increments\n\nThis option requires Linux platform `1.4.0` or later.", @@ -14350,6 +14629,7 @@ "SystemControls": "A list of namespaced kernel parameters to set in the container. This parameter maps to `Sysctls` in the docker container create command and the `--sysctl` option to docker run. For example, you can configure `net.ipv4.tcp_keepalive_time` setting to maintain longer lived connections.", "Ulimits": "A list of `ulimits` to set in the container. This parameter maps to `Ulimits` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--ulimit` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . Valid naming values are displayed in the [Ulimit](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_Ulimit.html) data type. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`\n\n> This parameter is not supported for Windows containers.", "User": "The user to use inside the container. This parameter maps to `User` in the docker container create command and the `--user` option to docker run.\n\n> When running tasks using the `host` network mode, don't run containers using the root user (UID 0). We recommend using a non-root user for better security. \n\nYou can specify the `user` using the following formats. If specifying a UID or GID, you must specify it as a positive integer.\n\n- `user`\n- `user:group`\n- `uid`\n- `uid:gid`\n- `user:gid`\n- `uid:group`\n\n> This parameter is not supported for Windows containers.", + "VersionConsistency": "Specifies whether Amazon ECS will resolve the container image tag provided in the container definition to an image digest. By default, the value is `enabled` . If you set the value for a container as `disabled` , Amazon ECS will not resolve the provided container image tag to a digest and will use the original image URI specified in the container definition for deployment. For more information about container image resolution, see [Container image resolution](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-ecs.html#deployment-container-image-stability) in the *Amazon ECS Developer Guide* .", "VolumesFrom": "Data volumes to mount from another container. This parameter maps to `VolumesFrom` in the docker container create command and the `--volumes-from` option to docker run.", "WorkingDirectory": "The working directory to run commands inside the container in. This parameter maps to `WorkingDir` in the docker container create command and the `--workdir` option to docker run." }, @@ -14446,7 +14726,7 @@ "ContainerPort": "The port number on the container that's bound to the user-specified or automatically assigned host port.\n\nIf you use containers in a task with the `awsvpc` or `host` network mode, specify the exposed ports using `containerPort` .\n\nIf you use containers in a task with the `bridge` network mode and you specify a container port and not a host port, your container automatically receives a host port in the ephemeral port range. For more information, see `hostPort` . Port mappings that are automatically assigned in this way do not count toward the 100 reserved ports limit of a container instance.", "ContainerPortRange": "The port number range on the container that's bound to the dynamically mapped host port range.\n\nThe following rules apply when you specify a `containerPortRange` :\n\n- You must use either the `bridge` network mode or the `awsvpc` network mode.\n- This parameter is available for both the EC2 and AWS Fargate launch types.\n- This parameter is available for both the Linux and Windows operating systems.\n- The container instance must have at least version 1.67.0 of the container agent and at least version 1.67.0-1 of the `ecs-init` package\n- You can specify a maximum of 100 port ranges per container.\n- You do not specify a `hostPortRange` . The value of the `hostPortRange` is set as follows:\n\n- For containers in a task with the `awsvpc` network mode, the `hostPortRange` is set to the same value as the `containerPortRange` . This is a static mapping strategy.\n- For containers in a task with the `bridge` network mode, the Amazon ECS agent finds open host ports from the default ephemeral range and passes it to docker to bind them to the container ports.\n- The `containerPortRange` valid values are between 1 and 65535.\n- A port can only be included in one port mapping per container.\n- You cannot specify overlapping port ranges.\n- The first port in the range must be less than last port in the range.\n- Docker recommends that you turn off the docker-proxy in the Docker daemon config file when you have a large number of ports.\n\nFor more information, see [Issue #11185](https://docs.aws.amazon.com/https://github.com/moby/moby/issues/11185) on the Github website.\n\nFor information about how to turn off the docker-proxy in the Docker daemon config file, see [Docker daemon](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/bootstrap_container_instance.html#bootstrap_docker_daemon) in the *Amazon ECS Developer Guide* .\n\nYou can call [`DescribeTasks`](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DescribeTasks.html) to view the `hostPortRange` which are the host ports that are bound to the container ports.", "HostPort": "The port number on the container instance to reserve for your container.\n\nIf you specify a `containerPortRange` , leave this field empty and the value of the `hostPort` is set as follows:\n\n- For containers in a task with the `awsvpc` network mode, the `hostPort` is set to the same value as the `containerPort` . This is a static mapping strategy.\n- For containers in a task with the `bridge` network mode, the Amazon ECS agent finds open ports on the host and automatically binds them to the container ports. This is a dynamic mapping strategy.\n\nIf you use containers in a task with the `awsvpc` or `host` network mode, the `hostPort` can either be left blank or set to the same value as the `containerPort` .\n\nIf you use containers in a task with the `bridge` network mode, you can specify a non-reserved host port for your container port mapping, or you can omit the `hostPort` (or set it to `0` ) while specifying a `containerPort` and your container automatically receives a port in the ephemeral port range for your container instance operating system and Docker version.\n\nThe default ephemeral port range for Docker version 1.6.0 and later is listed on the instance under `/proc/sys/net/ipv4/ip_local_port_range` . If this kernel parameter is unavailable, the default ephemeral port range from 49153 through 65535 (Linux) or 49152 through 65535 (Windows) is used. Do not attempt to specify a host port in the ephemeral port range as these are reserved for automatic assignment. In general, ports below 32768 are outside of the ephemeral port range.\n\nThe default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, and the Amazon ECS container agent ports 51678-51680. Any host port that was previously specified in a running task is also reserved while the task is running. That is, after a task stops, the host port is released. The current reserved ports are displayed in the `remainingResources` of [DescribeContainerInstances](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DescribeContainerInstances.html) output. A container instance can have up to 100 reserved ports at a time. This number includes the default reserved ports. Automatically assigned ports aren't included in the 100 reserved ports quota.", - "Name": "The name that's used for the port mapping. This parameter only applies to Service Connect. This parameter is the name that you use in the `serviceConnectConfiguration` of a service. The name can include up to 64 characters. The characters can include lowercase letters, numbers, underscores (_), and hyphens (-). The name can't start with a hyphen.\n\nFor more information, see [Service Connect](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html) in the *Amazon Elastic Container Service Developer Guide* .", + "Name": "The name that's used for the port mapping. This parameter is the name that you use in the `serviceConnectConfiguration` and the `vpcLatticeConfigurations` of a service. The name can include up to 64 characters. The characters can include lowercase letters, numbers, underscores (_), and hyphens (-). The name can't start with a hyphen.", "Protocol": "The protocol used for the port mapping. Valid values are `tcp` and `udp` . The default is `tcp` . `protocol` is immutable in a Service Connect service. Updating this field requires a service deletion and redeployment." }, "AWS::ECS::TaskDefinition ProxyConfiguration": { @@ -14616,7 +14896,10 @@ "AvailabilityZoneName": "For One Zone file systems, the replication configuration must specify the Availability Zone in which the destination file system is located.\n\nUse the format `us-east-1a` to specify the Availability Zone. For more information about One Zone file systems, see [EFS file system types](https://docs.aws.amazon.com/efs/latest/ug/storage-classes.html) in the *Amazon EFS User Guide* .\n\n> One Zone file system type is not available in all Availability Zones in AWS Regions where Amazon EFS is available.", "FileSystemId": "The ID of the destination Amazon EFS file system.", "KmsKeyId": "The ID of an AWS KMS key used to protect the encrypted file system.", - "Region": "The AWS Region in which the destination file system is located.\n\n> For One Zone file systems, the replication configuration must specify the AWS Region in which the destination file system is located." + "Region": "The AWS Region in which the destination file system is located.\n\n> For One Zone file systems, the replication configuration must specify the AWS Region in which the destination file system is located.", + "RoleArn": "", + "Status": "", + "StatusMessage": "" }, "AWS::EFS::MountTarget": { "FileSystemId": "The ID of the file system for which to create the mount target.", @@ -14782,6 +15065,7 @@ "InstanceTypes": "Specify the instance types for a node group. If you specify a GPU instance type, make sure to also specify an applicable GPU AMI type with the `amiType` parameter. If you specify `launchTemplate` , then you can specify zero or one instance type in your launch template *or* you can specify 0-20 instance types for `instanceTypes` . If however, you specify an instance type in your launch template *and* specify any `instanceTypes` , the node group deployment will fail. If you don't specify an instance type in a launch template or for `instanceTypes` , then `t3.medium` is used, by default. If you specify `Spot` for `capacityType` , then we recommend specifying multiple values for `instanceTypes` . For more information, see [Managed node group capacity types](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html#managed-node-group-capacity-types) and [Customizing managed nodes with launch templates](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", "Labels": "The Kubernetes `labels` applied to the nodes in the node group.\n\n> Only `labels` that are applied with the Amazon EKS API are shown here. There may be other Kubernetes `labels` applied to the nodes in this group.", "LaunchTemplate": "An object representing a node group's launch template specification. When using this object, don't directly specify `instanceTypes` , `diskSize` , or `remoteAccess` . Make sure that the launch template meets the requirements in `launchTemplateSpecification` . Also refer to [Customizing managed nodes with launch templates](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", + "NodeRepairConfig": "", "NodeRole": "The Amazon Resource Name (ARN) of the IAM role to associate with your node group. The Amazon EKS worker node `kubelet` daemon makes calls to AWS APIs on your behalf. Nodes receive permissions for these API calls through an IAM instance profile and associated policies. Before you can launch nodes and register them into a cluster, you must create an IAM role for those nodes to use when they are launched. For more information, see [Amazon EKS node IAM role](https://docs.aws.amazon.com/eks/latest/userguide/create-node-role.html) in the **Amazon EKS User Guide** . If you specify `launchTemplate` , then don't specify `[IamInstanceProfile](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IamInstanceProfile.html)` in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Customizing managed nodes with launch templates](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", "NodegroupName": "The unique name to give your node group.", "ReleaseVersion": "The AMI version of the Amazon EKS optimized AMI to use with your node group (for example, `1.14.7- *YYYYMMDD*` ). By default, the latest available AMI version for the node group's current Kubernetes version is used. For more information, see [Amazon EKS optimized Linux AMI Versions](https://docs.aws.amazon.com/eks/latest/userguide/eks-linux-ami-versions.html) in the *Amazon EKS User Guide* .\n\n> Changing this value triggers an update of the node group if one is available. You can't update other properties at the same time as updating `Release Version` .", @@ -14798,6 +15082,9 @@ "Name": "The name of the launch template.\n\nYou must specify either the launch template name or the launch template ID in the request, but not both.", "Version": "The version number of the launch template to use. If no version is specified, then the template's default version is used." }, + "AWS::EKS::Nodegroup NodeRepairConfig": { + "Enabled": "" + }, "AWS::EKS::Nodegroup RemoteAccess": { "Ec2SshKey": "The Amazon EC2 SSH key name that provides access for SSH communication with the nodes in the managed node group. For more information, see [Amazon EC2 key pairs and Linux instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) in the *Amazon Elastic Compute Cloud User Guide for Linux Instances* . For Windows, an Amazon EC2 SSH key is used to obtain the RDP password. For more information, see [Amazon EC2 key pairs and Windows instances](https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-key-pairs.html) in the *Amazon Elastic Compute Cloud User Guide for Windows Instances* .", "SourceSecurityGroups": "The security group IDs that are allowed SSH access (port 22) to the nodes. For Windows, the port is 3389. If you specify an Amazon EC2 SSH key but don't specify a source security group when you create a managed node group, then the port on the nodes is opened to the internet ( `0.0.0.0/0` ). For more information, see [Security Groups for Your VPC](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) in the *Amazon Virtual Private Cloud User Guide* ." @@ -15943,6 +16230,7 @@ "Weight": "The weight. The range is 0 to 999." }, "AWS::ElasticLoadBalancingV2::LoadBalancer": { + "EnablePrefixForIpv6SourceNat": "[Network Load Balancers with UDP listeners] Indicates whether to use an IPv6 prefix from each subnet for source NAT. The IP address type must be `dualstack` . The default value is `off` .", "EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic": "Indicates whether to evaluate inbound security group rules for traffic sent to a Network Load Balancer through AWS PrivateLink .", "IpAddressType": "The IP address type. Internal load balancers must use `ipv4` .\n\n[Application Load Balancers] The possible values are `ipv4` (IPv4 addresses), `dualstack` (IPv4 and IPv6 addresses), and `dualstack-without-public-ipv4` (public IPv6 addresses and private IPv4 and IPv6 addresses).\n\nApplication Load Balancer authentication supports IPv4 addresses only when connecting to an Identity Provider (IdP) or Amazon Cognito endpoint. Without a public IPv4 address the load balancer can't complete the authentication process, resulting in HTTP 500 errors.\n\n[Network Load Balancers and Gateway Load Balancers] The possible values are `ipv4` (IPv4 addresses) and `dualstack` (IPv4 and IPv6 addresses).", "LoadBalancerAttributes": "The load balancer attributes.", @@ -15962,6 +16250,7 @@ "AllocationId": "[Network Load Balancers] The allocation ID of the Elastic IP address for an internet-facing load balancer.", "IPv6Address": "[Network Load Balancers] The IPv6 address.", "PrivateIPv4Address": "[Network Load Balancers] The private IPv4 address for an internal load balancer.", + "SourceNatIpv6Prefix": "[Network Load Balancers with UDP listeners] The IPv6 prefix to use for source NAT. Specify an IPv6 prefix (/80 netmask) from the subnet CIDR block or `auto_assigned` to use an IPv6 prefix selected at random from the subnet CIDR block.", "SubnetId": "The ID of the subnet." }, "AWS::ElasticLoadBalancingV2::LoadBalancer Tag": { @@ -16714,15 +17003,26 @@ "Actions": "The actions for the experiment.", "Description": "The description for the experiment template.", "ExperimentOptions": "The experiment options for an experiment template.", + "ExperimentReportConfiguration": "Describes the report configuration for the experiment template.", "LogConfiguration": "The configuration for experiment logging.", "RoleArn": "The Amazon Resource Name (ARN) of an IAM role.", "StopConditions": "The stop conditions for the experiment.", "Tags": "The tags for the experiment template.", "Targets": "The targets for the experiment." }, + "AWS::FIS::ExperimentTemplate CloudWatchDashboard": { + "DashboardIdentifier": "The Amazon Resource Name (ARN) of the CloudWatch dashboard to include in the experiment report." + }, "AWS::FIS::ExperimentTemplate CloudWatchLogsConfiguration": { "LogGroupArn": "The Amazon Resource Name (ARN) of the destination Amazon CloudWatch Logs log group." }, + "AWS::FIS::ExperimentTemplate DataSources": { + "CloudWatchDashboards": "The CloudWatch dashboards to include as data sources in the experiment report." + }, + "AWS::FIS::ExperimentTemplate ExperimentReportS3Configuration": { + "BucketName": "The name of the S3 bucket where the experiment report will be stored.", + "Prefix": "The prefix of the S3 bucket where the experiment report will be stored." + }, "AWS::FIS::ExperimentTemplate ExperimentTemplateAction": { "ActionId": "The ID of the action.", "Description": "A description for the action.", @@ -16734,6 +17034,12 @@ "AccountTargeting": "The account targeting setting for an experiment template.", "EmptyTargetResolutionMode": "The empty target resolution mode for an experiment template." }, + "AWS::FIS::ExperimentTemplate ExperimentTemplateExperimentReportConfiguration": { + "DataSources": "The data sources for the experiment report.", + "Outputs": "The output destinations of the experiment report.", + "PostExperimentDuration": "The duration after the experiment end time for the data sources to include in the report.", + "PreExperimentDuration": "The duration before the experiment start time for the data sources to include in the report." + }, "AWS::FIS::ExperimentTemplate ExperimentTemplateLogConfiguration": { "CloudWatchLogsConfiguration": "The configuration for experiment logging to CloudWatch Logs .", "LogSchemaVersion": "The schema version.", @@ -16755,6 +17061,9 @@ "Path": "The attribute path for the filter.", "Values": "The attribute values for the filter." }, + "AWS::FIS::ExperimentTemplate Outputs": { + "ExperimentReportS3Configuration": "The S3 destination for the experiment report." + }, "AWS::FIS::ExperimentTemplate S3Configuration": { "BucketName": "The name of the destination bucket.", "Prefix": "The bucket prefix." @@ -17372,31 +17681,90 @@ "ObjectVersion": "A version of a stored file to retrieve, if the object versioning feature is turned on for the S3 bucket. Use this parameter to specify a specific version. If this parameter isn't set, Amazon GameLift retrieves the latest version of the file.", "RoleArn": "The ARNfor an IAM role that allows Amazon GameLift to access the S3 bucket." }, + "AWS::GameLift::ContainerFleet": { + "BillingType": "Indicates whether the fleet uses On-Demand or Spot instances for this fleet. Learn more about when to use [On-Demand versus Spot Instances](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-ec2-instances.html#gamelift-ec2-instances-spot) . You can't update this fleet property.\n\nBy default, this property is set to `ON_DEMAND` .", + "DeploymentConfiguration": "Set of rules for processing a deployment for a container fleet update.", + "Description": "A meaningful description of the container fleet.", + "FleetRoleArn": "The unique identifier for an AWS Identity and Access Management (IAM) role with permissions to run your containers on resources that are managed by Amazon GameLift. See [Set up an IAM service role](https://docs.aws.amazon.com/gamelift/latest/developerguide/setting-up-role.html) . This fleet property can't be changed.", + "GameServerContainerGroupDefinitionName": "The name of the fleet's game server container group definition, which describes how to deploy containers with your game server build and support software onto each fleet instance.", + "GameServerContainerGroupsPerInstance": "The number of times to replicate the game server container group on each fleet instance.", + "GameSessionCreationLimitPolicy": "A policy that limits the number of game sessions that each individual player can create on instances in this fleet. The limit applies for a specified span of time.", + "InstanceConnectionPortRange": "The set of port numbers to open on each instance in a container fleet. Connection ports are used by inbound traffic to connect with processes that are running in containers on the fleet.", + "InstanceInboundPermissions": "The IP address ranges and port settings that allow inbound traffic to access game server processes and other processes on this fleet.", + "InstanceType": "The Amazon EC2 instance type to use for all instances in the fleet. Instance type determines the computing resources and processing power that's available to host your game servers. This includes including CPU, memory, storage, and networking capacity. You can't update this fleet property.", + "Locations": "", + "LogConfiguration": "The method that is used to collect container logs for the fleet. Amazon GameLift saves all standard output for each container in logs, including game session logs.\n\n- `CLOUDWATCH` -- Send logs to an Amazon CloudWatch log group that you define. Each container emits a log stream, which is organized in the log group.\n- `S3` -- Store logs in an Amazon S3 bucket that you define.\n- `NONE` -- Don't collect container logs.", + "MetricGroups": "The name of an AWS CloudWatch metric group to add this fleet to. Metric groups aggregate metrics for multiple fleets.", + "NewGameSessionProtectionPolicy": "Determines whether Amazon GameLift can shut down game sessions on the fleet that are actively running and hosting players. Amazon GameLift might prompt an instance shutdown when scaling down fleet capacity or when retiring unhealthy instances. You can also set game session protection for individual game sessions using [UpdateGameSession](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateGameSession.html) .\n\n- *NoProtection* -- Game sessions can be shut down during active gameplay.\n- *FullProtection* -- Game sessions in `ACTIVE` status can't be shut down.", + "PerInstanceContainerGroupDefinitionName": "The name of the fleet's per-instance container group definition.", + "ScalingPolicies": "", + "Tags": "" + }, + "AWS::GameLift::ContainerFleet ConnectionPortRange": { + "FromPort": "Starting value for the port range.", + "ToPort": "Ending value for the port. Port numbers are end-inclusive. This value must be equal to or greater than `FromPort` ." + }, + "AWS::GameLift::ContainerFleet DeploymentConfiguration": { + "ImpairmentStrategy": "Determines what actions to take if a deployment fails. If the fleet is multi-location, this strategy applies across all fleet locations. With a rollback strategy, updated fleet instances are rolled back to the last successful deployment. Alternatively, you can maintain a few impaired containers for the purpose of debugging, while all other tasks return to the last successful deployment.", + "MinimumHealthyPercentage": "Sets a minimum level of healthy tasks to maintain during deployment activity.", + "ProtectionStrategy": "Determines how fleet deployment activity affects active game sessions on the fleet. With protection, a deployment honors game session protection, and delays actions that would interrupt a protected active game session until the game session ends. Without protection, deployment activity can shut down all running tasks, including active game sessions, regardless of game session protection." + }, + "AWS::GameLift::ContainerFleet DeploymentDetails": { + "LatestDeploymentId": "A unique identifier for a fleet deployment." + }, + "AWS::GameLift::ContainerFleet GameSessionCreationLimitPolicy": { + "NewGameSessionsPerCreator": "A policy that puts limits on the number of game sessions that a player can create within a specified span of time. With this policy, you can control players' ability to consume available resources.\n\nThe policy evaluates when a player tries to create a new game session. On receiving a `CreateGameSession` request, Amazon GameLift checks that the player (identified by `CreatorId` ) has created fewer than game session limit in the specified time period.", + "PolicyPeriodInMinutes": "The time span used in evaluating the resource creation limit policy." + }, + "AWS::GameLift::ContainerFleet IpPermission": { + "FromPort": "A starting value for a range of allowed port numbers.\n\nFor fleets using Linux builds, only ports `22` and `1026-60000` are valid.\n\nFor fleets using Windows builds, only ports `1026-60000` are valid.", + "IpRange": "A range of allowed IP addresses. This value must be expressed in CIDR notation. Example: \" `000.000.000.000/[subnet mask]` \" or optionally the shortened version \" `0.0.0.0/[subnet mask]` \".", + "Protocol": "The network communication protocol used by the fleet.", + "ToPort": "An ending value for a range of allowed port numbers. Port numbers are end-inclusive. This value must be equal to or greater than `FromPort` .\n\nFor fleets using Linux builds, only ports `22` and `1026-60000` are valid.\n\nFor fleets using Windows builds, only ports `1026-60000` are valid." + }, + "AWS::GameLift::ContainerFleet LocationCapacity": { + "DesiredEC2Instances": "", + "MaxSize": "", + "MinSize": "" + }, + "AWS::GameLift::ContainerFleet LocationConfiguration": { + "Location": "An AWS Region code, such as `us-west-2` . For a list of supported Regions and Local Zones, see [Amazon GameLift service locations](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-regions.html) for managed hosting.", + "LocationCapacity": "", + "StoppedActions": "" + }, + "AWS::GameLift::ContainerFleet LogConfiguration": { + "LogDestination": "The type of log collection to use for a fleet.\n\n- `CLOUDWATCH` -- (default value) Send logs to an Amazon CloudWatch log group that you define. Each container emits a log stream, which is organized in the log group.\n- `S3` -- Store logs in an Amazon S3 bucket that you define.\n- `NONE` -- Don't collect container logs.", + "S3BucketName": "If log destination is `S3` , logs are sent to the specified Amazon S3 bucket name." + }, + "AWS::GameLift::ContainerFleet ScalingPolicy": { + "ComparisonOperator": "Comparison operator to use when measuring a metric against the threshold value.", + "EvaluationPeriods": "Length of time (in minutes) the metric must be at or beyond the threshold before a scaling event is triggered.", + "MetricName": "Name of the Amazon GameLift-defined metric that is used to trigger a scaling adjustment. For detailed descriptions of fleet metrics, see [Monitor Amazon GameLift with Amazon CloudWatch](https://docs.aws.amazon.com/gamelift/latest/developerguide/monitoring-cloudwatch.html) .\n\n- *ActivatingGameSessions* -- Game sessions in the process of being created.\n- *ActiveGameSessions* -- Game sessions that are currently running.\n- *ActiveInstances* -- Fleet instances that are currently running at least one game session.\n- *AvailableGameSessions* -- Additional game sessions that fleet could host simultaneously, given current capacity.\n- *AvailablePlayerSessions* -- Empty player slots in currently active game sessions. This includes game sessions that are not currently accepting players. Reserved player slots are not included.\n- *CurrentPlayerSessions* -- Player slots in active game sessions that are being used by a player or are reserved for a player.\n- *IdleInstances* -- Active instances that are currently hosting zero game sessions.\n- *PercentAvailableGameSessions* -- Unused percentage of the total number of game sessions that a fleet could host simultaneously, given current capacity. Use this metric for a target-based scaling policy.\n- *PercentIdleInstances* -- Percentage of the total number of active instances that are hosting zero game sessions.\n- *QueueDepth* -- Pending game session placement requests, in any queue, where the current fleet is the top-priority destination.\n- *WaitTime* -- Current wait time for pending game session placement requests, in any queue, where the current fleet is the top-priority destination.", + "Name": "A descriptive label that is associated with a fleet's scaling policy. Policy names do not need to be unique.", + "PolicyType": "The type of scaling policy to create. For a target-based policy, set the parameter *MetricName* to 'PercentAvailableGameSessions' and specify a *TargetConfiguration* . For a rule-based policy set the following parameters: *MetricName* , *ComparisonOperator* , *Threshold* , *EvaluationPeriods* , *ScalingAdjustmentType* , and *ScalingAdjustment* .", + "ScalingAdjustment": "Amount of adjustment to make, based on the scaling adjustment type.", + "ScalingAdjustmentType": "The type of adjustment to make to a fleet's instance count.\n\n- *ChangeInCapacity* -- add (or subtract) the scaling adjustment value from the current instance count. Positive values scale up while negative values scale down.\n- *ExactCapacity* -- set the instance count to the scaling adjustment value.\n- *PercentChangeInCapacity* -- increase or reduce the current instance count by the scaling adjustment, read as a percentage. Positive values scale up while negative values scale down.", + "TargetConfiguration": "An object that contains settings for a target-based scaling policy.", + "Threshold": "Metric value used to trigger a scaling event." + }, + "AWS::GameLift::ContainerFleet Tag": { + "Key": "The key for a developer-defined key value pair for tagging an AWS resource.", + "Value": "The value for a developer-defined key value pair for tagging an AWS resource." + }, + "AWS::GameLift::ContainerFleet TargetConfiguration": { + "TargetValue": "Desired value to use with a target-based scaling policy. The value must be relevant for whatever metric the scaling policy is using. For example, in a policy using the metric PercentAvailableGameSessions, the target value should be the preferred size of the fleet's buffer (the percent of capacity that should be idle and ready for new game sessions)." + }, "AWS::GameLift::ContainerGroupDefinition": { - "ContainerDefinitions": "The set of container definitions that are included in the container group.", + "ContainerGroupType": "The type of container group. Container group type determines how Amazon GameLift deploys the container group on each fleet instance.", + "GameServerContainerDefinition": "The definition for the game server container in this group. This property is used only when the container group type is `GAME_SERVER` . This container definition specifies a container image with the game server build.", "Name": "A descriptive identifier for the container group definition. The name value is unique in an AWS Region.", - "OperatingSystem": "The platform required for all containers in the container group definition.\n\n> Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the [Amazon Linux 2 FAQs](https://docs.aws.amazon.com/https://aws.amazon.com/amazon-linux-2/faqs/) . For game servers that are hosted on AL2 and use Amazon GameLift server SDK 4.x., first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See [Migrate to Amazon GameLift server SDK version 5.](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-serversdk5-migration.html)", - "SchedulingStrategy": "The method for deploying the container group across fleet instances. A replica container group might have multiple copies on each fleet instance. A daemon container group maintains only one copy per fleet instance.", + "OperatingSystem": "The platform that all containers in the container group definition run on.\n\n> Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the [Amazon Linux 2 FAQs](https://docs.aws.amazon.com/https://aws.amazon.com/amazon-linux-2/faqs/) . For game servers that are hosted on AL2 and use Amazon GameLift server SDK 4.x, first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See [Migrate to Amazon GameLift server SDK version 5.](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-serversdk5-migration.html)", "SourceVersionNumber": "", - "SupportContainerDefinitions": "", + "SupportContainerDefinitions": "The set of definitions for support containers in this group. A container group definition might have zero support container definitions. Support container can be used in any type of container group.", "Tags": "", - "TotalCpuLimit": "The amount of CPU units on a fleet instance to allocate for the container group. All containers in the group share these resources. This property is an integer value in CPU units (1 vCPU is equal to 1024 CPU units).\n\nYou can set additional limits for each `ContainerDefinition` in the group. If individual containers have limits, this value must be equal to or greater than the sum of all container-specific CPU limits in the group.", - "TotalMemoryLimit": "The amount of memory (in MiB) on a fleet instance to allocate for the container group. All containers in the group share these resources.\n\nYou can set additional limits for each `ContainerDefinition` in the group. If individual containers have limits, this value must meet the following requirements:\n\n- Equal to or greater than the sum of all container-specific soft memory limits in the group.\n- Equal to or greater than any container-specific hard limits in the group." - }, - "AWS::GameLift::ContainerGroupDefinition ContainerDefinition": { - "Command": "A command that's passed to the container on startup. Each argument for the command is an additional string in the array. See the [ContainerDefinition::command](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html#ECS-Type-ContainerDefinition-command) parameter in the *Amazon Elastic Container Service API reference.*", - "ContainerName": "The container definition identifier. Container names are unique within a container group definition.", - "Cpu": "The number of CPU units that are reserved for the container. Note: 1 vCPU unit equals 1024 CPU units. If no resources are reserved, the container shares the total CPU limit for the container group.\n\n*Related data type:* `ContainerGroupDefinition$TotalCpuLimit`", - "DependsOn": "Indicates that the container relies on the status of other containers in the same container group during its startup and shutdown sequences. A container might have dependencies on multiple containers.", - "EntryPoint": "The entry point that's passed to the container on startup. If there are multiple arguments, each argument is an additional string in the array. See the [ContainerDefinition::entryPoint](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html#ECS-Type-ContainerDefinition-entryPoint) parameter in the *Amazon Elastic Container Service API Reference* .", - "Environment": "A set of environment variables that's passed to the container on startup. See the [ContainerDefinition::environment](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html#ECS-Type-ContainerDefinition-environment) parameter in the *Amazon Elastic Container Service API Reference* .", - "Essential": "Indicates whether the container is vital to the container group. If an essential container fails, the entire container group is restarted.", - "HealthCheck": "A configuration for a non-terminal health check. A container, which automatically restarts if it stops functioning, also restarts if it fails this health check. If an essential container in the daemon group fails a health check, the entire container group is restarted. The essential container in the replica group doesn't use this health check mechanism, because the Amazon GameLift Agent automatically handles the task.", - "ImageUri": "The URI to the image that $short; copied and deployed to a container fleet. For a more specific identifier, see `ResolvedImageDigest` .", - "MemoryLimits": "The amount of memory that Amazon GameLift makes available to the container. If memory limits aren't set for an individual container, the container shares the container group's total memory allocation.\n\n*Related data type:* `ContainerGroupDefinition$TotalMemoryLimit`", - "PortConfiguration": "Defines the ports that are available to assign to processes in the container. For example, a game server process requires a container port to allow game clients to connect to it. Container ports aren't directly accessed by inbound traffic. Amazon GameLift maps these container ports to externally accessible connection ports, which are assigned as needed from the container fleet's `ConnectionPortRange` .", - "ResolvedImageDigest": "A unique and immutable identifier for the container image that is deployed to a container fleet. The digest is a SHA 256 hash of the container image manifest.", - "WorkingDirectory": "The directory in the container where commands are run. See the [ContainerDefinition::workingDirectory](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html#ECS-Type-ContainerDefinition-workingDirectory) parameter in the *Amazon Elastic Container Service API Reference* ." + "TotalMemoryLimitMebibytes": "The amount of memory (in MiB) on a fleet instance to allocate for the container group. All containers in the group share these resources.\n\nYou can set a limit for each container definition in the group. If individual containers have limits, this total value must be greater than any individual container's memory limit.", + "TotalVcpuLimit": "The amount of vCPU units on a fleet instance to allocate for the container group (1 vCPU is equal to 1024 CPU units). All containers in the group share these resources. You can set a limit for each container definition in the group. If individual containers have limits, this total value must be equal to or greater than the sum of the limits for each container in the group.", + "VersionDescription": "An optional description that was provided for a container group definition update. Each version can have a unique description." }, "AWS::GameLift::ContainerGroupDefinition ContainerDependency": { "Condition": "The condition that the dependency container must reach before the dependent container can start. Valid conditions include:\n\n- START - The dependency container must have started.\n- COMPLETE - The dependency container has run to completion (exits). Use this condition with nonessential containers, such as those that run a script and then exit. The dependency container can't be an essential container.\n- SUCCESS - The dependency container has run to completion and exited with a zero status. The dependency container can't be an essential container.\n- HEALTHY - The dependency container has passed its Docker health check. Use this condition with dependency containers that have health checks configured. This condition is confirmed at container group startup only.", @@ -17409,40 +17777,63 @@ "AWS::GameLift::ContainerGroupDefinition ContainerHealthCheck": { "Command": "A string array that specifies the command that the container runs to determine if it's healthy.", "Interval": "The time period (in seconds) between each health check.", - "Retries": "The number of times to retry a failed health check before the container is considered unhealthy. The first run of the command does not count as a retry.", + "Retries": "The number of times to retry a failed health check before flagging the container unhealthy. The first run of the command does not count as a retry.", "StartPeriod": "The optional grace period (in seconds) to give a container time to bootstrap before the first failed health check counts toward the number of retries.", - "Timeout": "The time period (in seconds) to wait for a health check to succeed before a failed health check is counted." + "Timeout": "The time period (in seconds) to wait for a health check to succeed before counting a failed health check." + }, + "AWS::GameLift::ContainerGroupDefinition ContainerMountPoint": { + "AccessLevel": "The type of access for the container.", + "ContainerPath": "The mount path on the container. If this property isn't set, the instance path is used.", + "InstancePath": "The path to the source file or directory." }, "AWS::GameLift::ContainerGroupDefinition ContainerPortRange": { "FromPort": "A starting value for the range of allowed port numbers.", "Protocol": "The network protocol that these ports support.", "ToPort": "An ending value for the range of allowed port numbers. Port numbers are end-inclusive. This value must be equal to or greater than `FromPort` ." }, - "AWS::GameLift::ContainerGroupDefinition MemoryLimits": { - "HardLimit": "", - "SoftLimit": "" + "AWS::GameLift::ContainerGroupDefinition GameServerContainerDefinition": { + "ContainerName": "The container definition identifier. Container names are unique within a container group definition.", + "DependsOn": "Indicates that the container relies on the status of other containers in the same container group during startup and shutdown sequences. A container might have dependencies on multiple containers.", + "EnvironmentOverride": "A set of environment variables that's passed to the container on startup. See the [ContainerDefinition::environment](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html#ECS-Type-ContainerDefinition-environment) parameter in the *Amazon Elastic Container Service API Reference* .", + "ImageUri": "The URI to the image that Amazon GameLift uses when deploying this container to a container fleet. For a more specific identifier, see `ResolvedImageDigest` .", + "MountPoints": "A mount point that binds a path inside the container to a file or directory on the host system and lets it access the file or directory.", + "PortConfiguration": "The set of ports that are available to bind to processes in the container. For example, a game server process requires a container port to allow game clients to connect to it. Container ports aren't directly accessed by inbound traffic. Amazon GameLift maps these container ports to externally accessible connection ports, which are assigned as needed from the container fleet's `ConnectionPortRange` .", + "ResolvedImageDigest": "A unique and immutable identifier for the container image. The digest is a SHA 256 hash of the container image manifest.", + "ServerSdkVersion": "The Amazon GameLift server SDK version that the game server is integrated with. Only game servers using 5.2.0 or higher are compatible with container fleets." }, "AWS::GameLift::ContainerGroupDefinition PortConfiguration": { "ContainerPortRanges": "" }, + "AWS::GameLift::ContainerGroupDefinition SupportContainerDefinition": { + "ContainerName": "The container definition identifier. Container names are unique within a container group definition.", + "DependsOn": "Indicates that the container relies on the status of other containers in the same container group during its startup and shutdown sequences. A container might have dependencies on multiple containers.", + "EnvironmentOverride": "A set of environment variables that's passed to the container on startup. See the [ContainerDefinition::environment](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html#ECS-Type-ContainerDefinition-environment) parameter in the *Amazon Elastic Container Service API Reference* .", + "Essential": "Indicates whether the container is vital to the container group. If an essential container fails, the entire container group restarts.", + "HealthCheck": "A configuration for a non-terminal health check. A support container automatically restarts if it stops functioning or if it fails this health check.", + "ImageUri": "The URI to the image that Amazon GameLift deploys to a container fleet. For a more specific identifier, see `ResolvedImageDigest` .", + "MemoryHardLimitMebibytes": "The amount of memory that Amazon GameLift makes available to the container. If memory limits aren't set for an individual container, the container shares the container group's total memory allocation.\n\n*Related data type:* [ContainerGroupDefinition TotalMemoryLimitMebibytes](https://docs.aws.amazon.com/gamelift/latest/apireference/API_ContainerGroupDefinition.html)", + "MountPoints": "A mount point that binds a path inside the container to a file or directory on the host system and lets it access the file or directory.", + "PortConfiguration": "A set of ports that allow access to the container from external users. Processes running in the container can bind to a one of these ports. Container ports aren't directly accessed by inbound traffic. Amazon GameLift maps these container ports to externally accessible connection ports, which are assigned as needed from the container fleet's `ConnectionPortRange` .", + "ResolvedImageDigest": "A unique and immutable identifier for the container image. The digest is a SHA 256 hash of the container image manifest.", + "Vcpu": "The number of vCPU units that are reserved for the container. If no resources are reserved, the container shares the total vCPU limit for the container group.\n\n*Related data type:* [ContainerGroupDefinition TotalVcpuLimit](https://docs.aws.amazon.com/gamelift/latest/apireference/API_ContainerGroupDefinition.html)" + }, "AWS::GameLift::ContainerGroupDefinition Tag": { "Key": "The key for a developer-defined key value pair for tagging an AWS resource.", "Value": "The value for a developer-defined key value pair for tagging an AWS resource." }, "AWS::GameLift::Fleet": { "AnywhereConfiguration": "Amazon GameLift Anywhere configuration options.", - "ApplyCapacity": "Current resource capacity settings for managed EC2 fleets. For multi-location fleets, location values might refer to a fleet's remote location or its home Region.\n\n*Returned by:* [DescribeFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetCapacity.html) , [DescribeFleetLocationCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html) , [UpdateFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetCapacity.html)", + "ApplyCapacity": "Current resource capacity settings for managed EC2 fleets and managed container fleets. For multi-location fleets, location values might refer to a fleet's remote location or its home Region.\n\n*Returned by:* [DescribeFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetCapacity.html) , [DescribeFleetLocationCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html) , [UpdateFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetCapacity.html)", "BuildId": "A unique identifier for a build to be deployed on the new fleet. If you are deploying the fleet with a custom game build, you must specify this property. The build must have been successfully uploaded to Amazon GameLift and be in a `READY` status. This fleet setting cannot be changed once the fleet is created.", "CertificateConfiguration": "Prompts Amazon GameLift to generate a TLS/SSL certificate for the fleet. Amazon GameLift uses the certificates to encrypt traffic between game clients and the game servers running on Amazon GameLift. By default, the `CertificateConfiguration` is `DISABLED` . You can't change this property after you create the fleet.\n\nAWS Certificate Manager (ACM) certificates expire after 13 months. Certificate expiration can cause fleets to fail, preventing players from connecting to instances in the fleet. We recommend you replace fleets before 13 months, consider using fleet aliases for a smooth transition.\n\n> ACM isn't available in all AWS regions. A fleet creation request with certificate generation enabled in an unsupported Region, fails with a 4xx error. For more information about the supported Regions, see [Supported Regions](https://docs.aws.amazon.com/acm/latest/userguide/acm-regions.html) in the *AWS Certificate Manager User Guide* .", - "ComputeType": "The type of compute resource used to host your game servers.\n\n- `EC2` \u2013 The game server build is deployed to Amazon EC2 instances for cloud hosting. This is the default setting.\n- `CONTAINER` \u2013 Container images with your game server build and supporting software are deployed to Amazon EC2 instances for cloud hosting. With this compute type, you must specify the `ContainerGroupsConfiguration` parameter.\n- `ANYWHERE` \u2013 Game servers or container images with your game server and supporting software are deployed to compute resources that are provided and managed by you. With this compute type, you can also set the `AnywhereConfiguration` parameter.", - "ContainerGroupsConfiguration": "*This data type is currently not available. It is under improvement as we respond to customer feedback from the Containers public preview.*\n\nConfiguration details for a set of container groups, for use when creating a fleet with compute type `CONTAINER` .\n\n*Used with:* `CreateFleet`", + "ComputeType": "The type of compute resource used to host your game servers.\n\n- `EC2` \u2013 The game server build is deployed to Amazon EC2 instances for cloud hosting. This is the default setting.\n- `ANYWHERE` \u2013 Game servers and supporting software are deployed to compute resources that you provide and manage. With this compute type, you can also set the `AnywhereConfiguration` parameter.", "Description": "A description for the fleet.", "DesiredEC2Instances": "The number of EC2 instances that you want this fleet to host. When creating a new fleet, GameLift automatically sets this value to \"1\" and initiates a single instance. Once the fleet is active, update this value to trigger GameLift to add or remove instances from the fleet.", - "EC2InboundPermissions": "The IP address ranges and port settings that allow inbound traffic to access game server processes and other processes on this fleet. Set this parameter for EC2 and container fleets. You can leave this parameter empty when creating the fleet, but you must call `UpdateFleetPortSettings` to set it before players can connect to game sessions. As a best practice, we recommend opening ports for remote access only when you need them and closing them when you're finished. For Realtime Servers fleets, Amazon GameLift automatically sets TCP and UDP ranges.\n\nTo manage inbound access for a container fleet, set this parameter to the same port numbers that you set for the fleet's connection port range. During the life of the fleet, update this parameter to control which connection ports are open to inbound traffic.", - "EC2InstanceType": "The Amazon GameLift-supported Amazon EC2 instance type to use with EC2 and container fleets. Instance type determines the computing resources that will be used to host your game servers, including CPU, memory, storage, and networking capacity. See [Amazon Elastic Compute Cloud Instance Types](https://docs.aws.amazon.com/ec2/instance-types/) for detailed descriptions of Amazon EC2 instance types.", + "EC2InboundPermissions": "The IP address ranges and port settings that allow inbound traffic to access game server processes and other processes on this fleet. Set this parameter for managed EC2 fleets. You can leave this parameter empty when creating the fleet, but you must call [](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetPortSettings) to set it before players can connect to game sessions. As a best practice, we recommend opening ports for remote access only when you need them and closing them when you're finished. For Realtime Servers fleets, Amazon GameLift automatically sets TCP and UDP ranges.", + "EC2InstanceType": "The Amazon GameLift-supported Amazon EC2 instance type to use with managed EC2 fleets. Instance type determines the computing resources that will be used to host your game servers, including CPU, memory, storage, and networking capacity. See [Amazon Elastic Compute Cloud Instance Types](https://docs.aws.amazon.com/ec2/instance-types/) for detailed descriptions of Amazon EC2 instance types.", "FleetType": "Indicates whether to use On-Demand or Spot instances for this fleet. By default, this property is set to `ON_DEMAND` . Learn more about when to use [On-Demand versus Spot Instances](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-ec2-instances.html#gamelift-ec2-instances-spot) . This fleet property can't be changed after the fleet is created.", - "InstanceRoleARN": "A unique identifier for an IAM role with access permissions to other AWS services. Any application that runs on an instance in the fleet--including install scripts, server processes, and other processes--can use these permissions to interact with AWS resources that you own or have access to. For more information about using the role with your game server builds, see [Communicate with other AWS resources from your fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is \"EC2\".", - "InstanceRoleCredentialsProvider": "Indicates that fleet instances maintain a shared credentials file for the IAM role defined in `InstanceRoleArn` . Shared credentials allow applications that are deployed with the game server executable to communicate with other AWS resources. This property is used only when the game server is integrated with the server SDK version 5.x. For more information about using shared credentials, see [Communicate with other AWS resources from your fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is \"EC2\".", + "InstanceRoleARN": "A unique identifier for an IAM role that manages access to your AWS services. With an instance role ARN set, any application that runs on an instance in this fleet can assume the role, including install scripts, server processes, and daemons (background processes). Create a role or look up a role's ARN by using the [IAM dashboard](https://docs.aws.amazon.com/iam/) in the AWS Management Console . Learn more about using on-box credentials for your game servers at [Access external resources from a game server](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is `EC2` .", + "InstanceRoleCredentialsProvider": "Indicates that fleet instances maintain a shared credentials file for the IAM role defined in `InstanceRoleArn` . Shared credentials allow applications that are deployed with the game server executable to communicate with other AWS resources. This property is used only when the game server is integrated with the server SDK version 5.x. For more information about using shared credentials, see [Communicate with other AWS resources from your fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is `EC2` .", "Locations": "A set of remote locations to deploy additional instances to and manage as a multi-location fleet. Use this parameter when creating a fleet in AWS Regions that support multiple locations. You can add any AWS Region or Local Zone that's supported by Amazon GameLift. Provide a list of one or more AWS Region codes, such as `us-west-2` , or Local Zone names. When using this parameter, Amazon GameLift requires you to include your home location in the request. For a list of supported Regions and Local Zones, see [Amazon GameLift service locations](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-regions.html) for managed hosting.", "MaxSize": "The maximum number of instances that are allowed in the specified fleet location. If this parameter is not set, the default is 1.", "MetricGroups": "The name of an AWS CloudWatch metric group to add this fleet to. A metric group is used to aggregate the metrics for multiple fleets. You can specify an existing metric group name or set a new name to create a new metric group. A fleet can be included in only one metric group at a time.", @@ -17462,19 +17853,6 @@ "AWS::GameLift::Fleet CertificateConfiguration": { "CertificateType": "Indicates whether a TLS/SSL certificate is generated for a fleet.\n\nValid values include:\n\n- *GENERATED* - Generate a TLS/SSL certificate for this fleet.\n- *DISABLED* - (default) Do not generate a TLS/SSL certificate for this fleet." }, - "AWS::GameLift::Fleet ConnectionPortRange": { - "FromPort": "Starting value for the port range.", - "ToPort": "Ending value for the port. Port numbers are end-inclusive. This value must be equal to or greater than `FromPort` ." - }, - "AWS::GameLift::Fleet ContainerGroupsConfiguration": { - "ConnectionPortRange": "A set of ports to allow inbound traffic, including game clients, to connect to processes running in the container fleet.\n\nConnection ports are dynamically mapped to container ports, which are assigned to individual processes running in a container. The connection port range must have enough ports to map to all container ports across a fleet instance. To calculate the minimum connection ports needed, use the following formula:\n\n*[Total number of container ports as defined for containers in the replica container group] * [Desired or calculated number of replica container groups per instance] + [Total number of container ports as defined for containers in the daemon container group]*\n\nAs a best practice, double the minimum number of connection ports.\n\n> Use the fleet's `EC2InboundPermissions` property to control external access to connection ports. Set this property to the connection port numbers that you want to open access to. See `IpPermission` for more details.", - "ContainerGroupDefinitionNames": "The list of container group definition names to deploy to a new container fleet.", - "ContainerGroupsPerInstance": "" - }, - "AWS::GameLift::Fleet ContainerGroupsPerInstance": { - "DesiredReplicaContainerGroupsPerInstance": "The desired number of replica container groups to place on each fleet instance.", - "MaxReplicaContainerGroupsPerInstance": "The maximum possible number of replica container groups that each fleet instance can have." - }, "AWS::GameLift::Fleet IpPermission": { "FromPort": "A starting value for a range of allowed port numbers.\n\nFor fleets using Linux builds, only ports `22` and `1026-60000` are valid.\n\nFor fleets using Windows builds, only ports `1026-60000` are valid.", "IpRange": "A range of allowed IP addresses. This value must be expressed in CIDR notation. Example: \" `000.000.000.000/[subnet mask]` \" or optionally the shortened version \" `0.0.0.0/[subnet mask]` \".", @@ -17488,7 +17866,7 @@ }, "AWS::GameLift::Fleet LocationConfiguration": { "Location": "An AWS Region code, such as `us-west-2` . For a list of supported Regions and Local Zones, see [Amazon GameLift service locations](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-regions.html) for managed hosting.", - "LocationCapacity": "Current resource capacity settings for managed EC2 fleets. For multi-location fleets, location values might refer to a fleet's remote location or its home Region.\n\n*Returned by:* [DescribeFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetCapacity.html) , [DescribeFleetLocationCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html) , [UpdateFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetCapacity.html)" + "LocationCapacity": "Current resource capacity settings for managed EC2 fleets and managed container fleets. For multi-location fleets, location values might refer to a fleet's remote location or its home Region.\n\n*Returned by:* [DescribeFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetCapacity.html) , [DescribeFleetLocationCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html) , [UpdateFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetCapacity.html)" }, "AWS::GameLift::Fleet ResourceCreationLimitPolicy": { "NewGameSessionsPerCreator": "A policy that puts limits on the number of game sessions that a player can create within a specified span of time. With this policy, you can control players' ability to consume available resources.\n\nThe policy is evaluated when a player tries to create a new game session. On receiving a `CreateGameSession` request, Amazon GameLift checks that the player (identified by `CreatorId` ) has created fewer than game session limit in the specified time period.", @@ -17496,7 +17874,7 @@ }, "AWS::GameLift::Fleet RuntimeConfiguration": { "GameSessionActivationTimeoutSeconds": "The maximum amount of time (in seconds) allowed to launch a new game session and have it report ready to host players. During this time, the game session is in status `ACTIVATING` . If the game session does not become active before the timeout, it is ended and the game session status is changed to `TERMINATED` .", - "MaxConcurrentGameSessionActivations": "The number of game sessions in status `ACTIVATING` to allow on an instance. This setting limits the instance resources that can be used for new game activations at any one time.", + "MaxConcurrentGameSessionActivations": "The number of game sessions in status `ACTIVATING` to allow on an instance or compute. This setting limits the instance resources that can be used for new game activations at any one time.", "ServerProcesses": "A collection of server process configurations that identify what server processes to run on fleet computes." }, "AWS::GameLift::Fleet ScalingPolicy": { @@ -17514,7 +17892,7 @@ "UpdateStatus": "The current status of the fleet's scaling policies in a requested fleet location. The status `PENDING_UPDATE` indicates that an update was requested for the fleet but has not yet been completed for the location." }, "AWS::GameLift::Fleet ServerProcess": { - "ConcurrentExecutions": "The number of server processes using this configuration that run concurrently on each instance.", + "ConcurrentExecutions": "The number of server processes using this configuration that run concurrently on each instance or compute.", "LaunchPath": "The location of a game build executable or Realtime script. Game builds and Realtime scripts are installed on instances at the root:\n\n- Windows (custom game builds only): `C:\\game` . Example: \" `C:\\game\\MyGame\\server.exe` \"\n- Linux: `/local/game` . Examples: \" `/local/game/MyGame/server.exe` \" or \" `/local/game/MyRealtimeScript.js` \"\n\n> Amazon GameLift doesn't support the use of setup scripts that launch the game executable. For custom game builds, this parameter must indicate the executable that calls the server SDK operations `initSDK()` and `ProcessReady()` .", "Parameters": "An optional list of parameters to pass to the server executable or Realtime script on launch.\n\nLength Constraints: Minimum length of 1. Maximum length of 1024.\n\nPattern: [A-Za-z0-9_:.+\\/\\\\\\- =@{},?'\\[\\]\"]+" }, @@ -17561,10 +17939,10 @@ "FilterConfiguration": "A list of locations where a queue is allowed to place new game sessions. Locations are specified in the form of AWS Region codes, such as `us-west-2` . If this parameter is not set, game sessions can be placed in any queue location.", "Name": "A descriptive label that is associated with game session queue. Queue names must be unique within each Region.", "NotificationTarget": "An SNS topic ARN that is set up to receive game session placement notifications. See [Setting up notifications for game session placement](https://docs.aws.amazon.com/gamelift/latest/developerguide/queue-notification.html) .", - "PlayerLatencyPolicies": "A set of policies that act as a sliding cap on player latency. FleetIQ works to deliver low latency for most players in a game session. These policies ensure that no individual player can be placed into a game with unreasonably high latency. Use multiple policies to gradually relax latency requirements a step at a time. Multiple policies are applied based on their maximum allowed latency, starting with the lowest value.", + "PlayerLatencyPolicies": "A set of policies that enforce a sliding cap on player latency when processing game sessions placement requests. Use multiple policies to gradually relax the cap over time if Amazon GameLift can't make a placement. Policies are evaluated in order starting with the lowest maximum latency value.", "PriorityConfiguration": "Custom settings to use when prioritizing destinations and locations for game session placements. This configuration replaces the FleetIQ default prioritization process. Priority types that are not explicitly named will be automatically applied at the end of the prioritization process.", "Tags": "A list of labels to assign to the new game session queue resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management and cost allocation. For more information, see [Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Reference* . Once the resource is created, you can use TagResource, UntagResource, and ListTagsForResource to add, remove, and view tags. The maximum tag limit may be lower than stated. See the AWS General Reference for actual tagging limits.", - "TimeoutInSeconds": "The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a `TIMED_OUT` status. By default, this property is set to `600` ." + "TimeoutInSeconds": "The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a `TIMED_OUT` status." }, "AWS::GameLift::GameSessionQueue FilterConfiguration": { "AllowedLocations": "A list of locations to allow game session placement in, in the form of AWS Region codes such as `us-west-2` ." @@ -17595,7 +17973,7 @@ "AWS::GameLift::MatchmakingConfiguration": { "AcceptanceRequired": "A flag that determines whether a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to `TRUE` . With this option enabled, matchmaking tickets use the status `REQUIRES_ACCEPTANCE` to indicate when a completed potential match is waiting for player acceptance.", "AcceptanceTimeoutSeconds": "The length of time (in seconds) to wait for players to accept a proposed match, if acceptance is required.", - "AdditionalPlayerCount": "The number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single 10-person team, and the additional player count is set to 2, 10 players will be selected for the match and 2 more player slots will be open for future players. This parameter is not used if `FlexMatchMode` is set to `STANDALONE` .", + "AdditionalPlayerCount": "The number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single 12-person team, and the additional player count is set to 2, only 10 players are selected for the match. This parameter is not used if `FlexMatchMode` is set to `STANDALONE` .", "BackfillMode": "The method used to backfill game sessions that are created with this matchmaking configuration. Specify `MANUAL` when your game manages backfill requests manually or does not use the match backfill feature. Specify `AUTOMATIC` to have GameLift create a `StartMatchBackfill` request whenever a game session has one or more open slots. Learn more about manual and automatic backfill in [Backfill Existing Games with FlexMatch](https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-backfill.html) . Automatic backfill is not available when `FlexMatchMode` is set to `STANDALONE` .", "CreationTime": "A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example `\"1469498468.057\"` ).", "CustomEventData": "Information to add to all events related to the matchmaking configuration.", @@ -20367,11 +20745,20 @@ "ThingTypeName": "The name of the thing type.", "ThingTypeProperties": "The thing type properties for the thing type to create. It contains information about the new thing type including a description, and a list of searchable thing attribute names. `ThingTypeProperties` can't be updated after the initial creation of the `ThingType` ." }, + "AWS::IoT::ThingType Mqtt5Configuration": { + "PropagatingAttributes": "" + }, + "AWS::IoT::ThingType PropagatingAttribute": { + "ConnectionAttribute": "", + "ThingAttribute": "", + "UserPropertyKey": "" + }, "AWS::IoT::ThingType Tag": { "Key": "The tag's key.", "Value": "The tag's value." }, "AWS::IoT::ThingType ThingTypeProperties": { + "Mqtt5Configuration": "", "SearchableAttributes": "A list of searchable thing attribute names.", "ThingTypeDescription": "The description of the thing type." }, @@ -21199,6 +21586,7 @@ "Compression": "(Optional) Whether to compress signals before transmitting data to AWS IoT FleetWise . If you don't want to compress the signals, use `OFF` . If it's not specified, `SNAPPY` is used.\n\nDefault: `SNAPPY`", "DataDestinationConfigs": "(Optional) The destination where the campaign sends data. You can choose to send data to be stored in Amazon S3 or Amazon Timestream .\n\nAmazon S3 optimizes the cost of data storage and provides additional mechanisms to use vehicle data, such as data lakes, centralized data storage, data processing pipelines, and analytics. AWS IoT FleetWise supports at-least-once file delivery to S3. Your vehicle data is stored on multiple AWS IoT FleetWise servers for redundancy and high availability.\n\nYou can use Amazon Timestream to access and analyze time series data, and Timestream to query vehicle data so that you can identify trends and patterns.", "DataExtraDimensions": "(Optional) A list of vehicle attributes to associate with a campaign.\n\nEnrich the data with specified vehicle attributes. For example, add `make` and `model` to the campaign, and AWS IoT FleetWise will associate the data with those attributes as dimensions in Amazon Timestream . You can then query the data against `make` and `model` .\n\nDefault: An empty array", + "DataPartitions": "", "Description": "(Optional) The description of the campaign.", "DiagnosticsMode": "(Optional) Option for a vehicle to send diagnostic trouble codes to AWS IoT FleetWise . If you want to send diagnostic trouble codes, use `SEND_ACTIVE_DTCS` . If it's not specified, `OFF` is used.\n\nDefault: `OFF`", "ExpiryTime": "(Optional) The time the campaign expires, in seconds since epoch (January 1, 1970 at midnight UTC time). Vehicle data isn't collected after the campaign expires.\n\nDefault: 253402214400 (December 31, 9999, 00:00:00 UTC)", @@ -21232,6 +21620,20 @@ "S3Config": "(Optional) The Amazon S3 bucket where the AWS IoT FleetWise campaign sends data.", "TimestreamConfig": "(Optional) The Amazon Timestream table where the campaign sends data." }, + "AWS::IoTFleetWise::Campaign DataPartition": { + "Id": "", + "StorageOptions": "", + "UploadOptions": "" + }, + "AWS::IoTFleetWise::Campaign DataPartitionStorageOptions": { + "MaximumSize": "", + "MinimumTimeToLive": "", + "StorageLocation": "" + }, + "AWS::IoTFleetWise::Campaign DataPartitionUploadOptions": { + "ConditionLanguageVersion": "", + "Expression": "" + }, "AWS::IoTFleetWise::Campaign MqttTopicConfig": { "ExecutionRoleArn": "", "MqttTopicArn": "" @@ -21253,10 +21655,19 @@ "SignalFetchConfig": "" }, "AWS::IoTFleetWise::Campaign SignalInformation": { + "DataPartitionId": "", "MaxSampleCount": "(Optional) The maximum number of samples to collect.", "MinimumSamplingIntervalMs": "(Optional) The minimum duration of time (in milliseconds) between two triggering events to collect data.\n\n> If a signal changes often, you might want to collect data at a slower rate.", "Name": "The name of the signal." }, + "AWS::IoTFleetWise::Campaign StorageMaximumSize": { + "Unit": "", + "Value": "" + }, + "AWS::IoTFleetWise::Campaign StorageMinimumTimeToLive": { + "Unit": "", + "Value": "" + }, "AWS::IoTFleetWise::Campaign Tag": { "Key": "The tag's key.", "Value": "The tag's value." @@ -21581,7 +21992,7 @@ "PropertyPath": "The path of the property. Each step of the path is the name of the step. See the following example:\n\n`PropertyPath: Name: AssetModelName Name: Composite1 Name: NestedComposite`" }, "AWS::IoTSiteWise::Dashboard": { - "DashboardDefinition": "The dashboard definition specified in a JSON literal. For detailed information, see [Creating dashboards (CLI)](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/create-dashboards-using-aws-cli.html) in the *AWS IoT SiteWise User Guide* .", + "DashboardDefinition": "The dashboard definition specified in a JSON literal.\n\n- AWS IoT SiteWise Monitor (Classic) see [Create dashboards ( AWS CLI )](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/create-dashboards-using-aws-cli.html)\n- AWS IoT SiteWise Monitor (AI-aware) see [Create dashboards ( AWS CLI )](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/create-dashboards-ai-dashboard-cli.html)\n\nin the *AWS IoT SiteWise User Guide*", "DashboardDescription": "A description for the dashboard.", "DashboardName": "A friendly name for the dashboard.", "ProjectId": "The ID of the project in which to create the dashboard.", @@ -23056,6 +23467,7 @@ "AWS::KinesisFirehose::DeliveryStream": { "AmazonOpenSearchServerlessDestinationConfiguration": "Describes the configuration of a destination in the Serverless offering for Amazon OpenSearch Service.", "AmazonopensearchserviceDestinationConfiguration": "The destination in Amazon OpenSearch Service. You can specify only one destination.", + "DatabaseSourceConfiguration": "The top level object for configuring streams with database as a source.\n\nAmazon Data Firehose is in preview release and is subject to change.", "DeliveryStreamEncryptionConfigurationInput": "Specifies the type and Amazon Resource Name (ARN) of the CMK to use for Server-Side Encryption (SSE).", "DeliveryStreamName": "The name of the Firehose stream.", "DeliveryStreamType": "The Firehose stream type. This can be one of the following values:\n\n- `DirectPut` : Provider applications access the Firehose stream directly.\n- `KinesisStreamAsSource` : The Firehose stream uses a Kinesis data stream as a source.", @@ -23069,7 +23481,7 @@ "S3DestinationConfiguration": "The `S3DestinationConfiguration` property type specifies an Amazon Simple Storage Service (Amazon S3) destination to which Amazon Kinesis Data Firehose (Kinesis Data Firehose) delivers data.\n\nConditional. You must specify only one destination configuration.\n\nIf you change the delivery stream destination from an Amazon S3 destination to an Amazon ES destination, update requires [some interruptions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-some-interrupt) .", "SnowflakeDestinationConfiguration": "Configure Snowflake destination", "SplunkDestinationConfiguration": "The configuration of a destination in Splunk for the delivery stream.", - "Tags": "A set of tags to assign to the Firehose stream. A tag is a key-value pair that you can define and assign to AWS resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the Firehose stream. For more information about tags, see [Using Cost Allocation Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the AWS Billing and Cost Management User Guide.\n\nYou can specify up to 50 tags when creating a Firehose stream.\n\nIf you specify tags in the `CreateDeliveryStream` action, Amazon Data Firehose performs an additional authorization on the `firehose:TagDeliveryStream` action to verify if users have permissions to create tags. If you do not provide this permission, requests to create new Firehose Firehose streams with IAM resource tags will fail with an `AccessDeniedException` such as following.\n\n*AccessDeniedException*\n\nUser: arn:aws:sts::x:assumed-role/x/x is not authorized to perform: firehose:TagDeliveryStream on resource: arn:aws:firehose:us-east-1:x:deliverystream/x with an explicit deny in an identity-based policy.\n\nFor an example IAM policy, see [Tag example.](https://docs.aws.amazon.com/firehose/latest/APIReference/API_CreateDeliveryStream.html#API_CreateDeliveryStream_Examples)" + "Tags": "A set of tags to assign to the Firehose stream. A tag is a key-value pair that you can define and assign to AWS resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the Firehose stream. For more information about tags, see [Using Cost Allocation Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the AWS Billing and Cost Management User Guide.\n\nYou can specify up to 50 tags when creating a Firehose stream.\n\nIf you specify tags in the `CreateDeliveryStream` action, Amazon Data Firehose performs an additional authorization on the `firehose:TagDeliveryStream` action to verify if users have permissions to create tags. If you do not provide this permission, requests to create new Firehose streams with IAM resource tags will fail with an `AccessDeniedException` such as following.\n\n*AccessDeniedException*\n\nUser: arn:aws:sts::x:assumed-role/x/x is not authorized to perform: firehose:TagDeliveryStream on resource: arn:aws:firehose:us-east-1:x:deliverystream/x with an explicit deny in an identity-based policy.\n\nFor an example IAM policy, see [Tag example.](https://docs.aws.amazon.com/firehose/latest/APIReference/API_CreateDeliveryStream.html#API_CreateDeliveryStream_Examples)" }, "AWS::KinesisFirehose::DeliveryStream AmazonOpenSearchServerlessBufferingHints": { "IntervalInSeconds": "Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes).", @@ -23140,6 +23552,39 @@ "OutputFormatConfiguration": "Specifies the serializer that you want Firehose to use to convert the format of your data to the Parquet or ORC format. This parameter is required if `Enabled` is set to true.", "SchemaConfiguration": "Specifies the AWS Glue Data Catalog table that contains the column information. This parameter is required if `Enabled` is set to true." }, + "AWS::KinesisFirehose::DeliveryStream DatabaseColumns": { + "Exclude": "", + "Include": "" + }, + "AWS::KinesisFirehose::DeliveryStream DatabaseSourceAuthenticationConfiguration": { + "SecretsManagerConfiguration": "" + }, + "AWS::KinesisFirehose::DeliveryStream DatabaseSourceConfiguration": { + "Columns": "The list of column patterns in source database endpoint for Firehose to read from.\n\nAmazon Data Firehose is in preview release and is subject to change.", + "DatabaseSourceAuthenticationConfiguration": "The structure to configure the authentication methods for Firehose to connect to source database endpoint.\n\nAmazon Data Firehose is in preview release and is subject to change.", + "DatabaseSourceVPCConfiguration": "The details of the VPC Endpoint Service which Firehose uses to create a PrivateLink to the database.\n\nAmazon Data Firehose is in preview release and is subject to change.", + "Databases": "The list of database patterns in source database endpoint for Firehose to read from.\n\nAmazon Data Firehose is in preview release and is subject to change.", + "Digest": "", + "Endpoint": "The endpoint of the database server.\n\nAmazon Data Firehose is in preview release and is subject to change.", + "Port": "The port of the database. This can be one of the following values.\n\n- 3306 for MySQL database type\n- 5432 for PostgreSQL database type\n\nAmazon Data Firehose is in preview release and is subject to change.", + "PublicCertificate": "", + "SSLMode": "The mode to enable or disable SSL when Firehose connects to the database endpoint.\n\nAmazon Data Firehose is in preview release and is subject to change.", + "SnapshotWatermarkTable": "The fully qualified name of the table in source database endpoint that Firehose uses to track snapshot progress.\n\nAmazon Data Firehose is in preview release and is subject to change.", + "SurrogateKeys": "The optional list of table and column names used as unique key columns when taking snapshot if the tables don\u2019t have primary keys configured.\n\nAmazon Data Firehose is in preview release and is subject to change.", + "Tables": "The list of table patterns in source database endpoint for Firehose to read from.\n\nAmazon Data Firehose is in preview release and is subject to change.", + "Type": "The type of database engine. This can be one of the following values.\n\n- MySQL\n- PostgreSQL\n\nAmazon Data Firehose is in preview release and is subject to change." + }, + "AWS::KinesisFirehose::DeliveryStream DatabaseSourceVPCConfiguration": { + "VpcEndpointServiceName": "The VPC endpoint service name which Firehose uses to create a PrivateLink to the database. The endpoint service must have the Firehose service principle `firehose.amazonaws.com` as an allowed principal on the VPC endpoint service. The VPC endpoint service name is a string that looks like `com.amazonaws.vpce..` .\n\nAmazon Data Firehose is in preview release and is subject to change." + }, + "AWS::KinesisFirehose::DeliveryStream DatabaseTables": { + "Exclude": "", + "Include": "" + }, + "AWS::KinesisFirehose::DeliveryStream Databases": { + "Exclude": "", + "Include": "" + }, "AWS::KinesisFirehose::DeliveryStream DeliveryStreamEncryptionConfigurationInput": { "KeyARN": "If you set `KeyType` to `CUSTOMER_MANAGED_CMK` , you must specify the Amazon Resource Name (ARN) of the CMK. If you set `KeyType` to `AWS _OWNED_CMK` , Firehose uses a service-account CMK.", "KeyType": "Indicates the type of customer master key (CMK) to use for encryption. The default setting is `AWS_OWNED_CMK` . For more information about CMKs, see [Customer Master Keys (CMKs)](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#master_keys) .\n\nYou can use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 delivery streams.\n\n> To encrypt your delivery stream, use symmetric CMKs. Kinesis Data Firehose doesn't support asymmetric CMKs. For information about symmetric and asymmetric CMKs, see [About Symmetric and Asymmetric CMKs](https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html) in the AWS Key Management Service developer guide." @@ -23345,7 +23790,7 @@ }, "AWS::KinesisFirehose::DeliveryStream Serializer": { "OrcSerDe": "A serializer to use for converting data to the ORC format before storing it in Amazon S3. For more information, see [Apache ORC](https://docs.aws.amazon.com/https://orc.apache.org/docs/) .", - "ParquetSerDe": "A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, see [Apache Parquet](https://docs.aws.amazon.com/https://parquet.apache.org/documentation/latest/) ." + "ParquetSerDe": "A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, see [Apache Parquet](https://docs.aws.amazon.com/https://parquet.apache.org/docs/contribution-guidelines/) ." }, "AWS::KinesisFirehose::DeliveryStream SnowflakeBufferingHints": { "IntervalInSeconds": "Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 0.", @@ -23355,11 +23800,11 @@ "AccountUrl": "URL for accessing your Snowflake account. This URL must include your [account identifier](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/admin-account-identifier) . Note that the protocol (https://) and port number are optional.", "BufferingHints": "Describes the buffering to perform before delivering data to the Snowflake destination. If you do not specify any value, Firehose uses the default values.", "CloudWatchLoggingOptions": "", - "ContentColumnName": "The name of the record content column", + "ContentColumnName": "The name of the record content column.", "DataLoadingOption": "Choose to load JSON keys mapped to table column names or choose to split the JSON payload where content is mapped to a record content column and source metadata is mapped to a record metadata column.", "Database": "All data in Snowflake is maintained in databases.", "KeyPassphrase": "Passphrase to decrypt the private key when the key is encrypted. For information, see [Using Key Pair Authentication & Key Rotation](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/data-load-snowpipe-streaming-configuration#using-key-pair-authentication-key-rotation) .", - "MetaDataColumnName": "The name of the record metadata column", + "MetaDataColumnName": "Specify a column name in the table, where the metadata information has to be loaded. When you enable this field, you will see the following column in the snowflake table, which differs based on the source type.\n\nFor Direct PUT as source\n\n`{ \"firehoseDeliveryStreamName\" : \"streamname\", \"IngestionTime\" : \"timestamp\" }`\n\nFor Kinesis Data Stream as source\n\n`\"kinesisStreamName\" : \"streamname\", \"kinesisShardId\" : \"Id\", \"kinesisPartitionKey\" : \"key\", \"kinesisSequenceNumber\" : \"1234\", \"subsequenceNumber\" : \"2334\", \"IngestionTime\" : \"timestamp\" }`", "PrivateKey": "The private key used to encrypt your Snowflake client. For information, see [Using Key Pair Authentication & Key Rotation](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/data-load-snowpipe-streaming-configuration#using-key-pair-authentication-key-rotation) .", "ProcessingConfiguration": "Specifies configuration for Snowflake.", "RetryOptions": "The time period where Firehose will retry sending data to the chosen HTTP endpoint.", @@ -23650,7 +24095,7 @@ "Value": "The value for this tag." }, "AWS::Lambda::EventInvokeConfig": { - "DestinationConfig": "A destination for events after they have been sent to a function for processing.\n\n**Destinations** - *Function* - The Amazon Resource Name (ARN) of a Lambda function.\n- *Queue* - The ARN of a standard SQS queue.\n- *Topic* - The ARN of a standard SNS topic.\n- *Event Bus* - The ARN of an Amazon EventBridge event bus.", + "DestinationConfig": "A destination for events after they have been sent to a function for processing.\n\n**Destinations** - *Function* - The Amazon Resource Name (ARN) of a Lambda function.\n- *Queue* - The ARN of a standard SQS queue.\n- *Bucket* - The ARN of an Amazon S3 bucket.\n- *Topic* - The ARN of a standard SNS topic.\n- *Event Bus* - The ARN of an Amazon EventBridge event bus.\n\n> S3 buckets are supported only for on-failure destinations. To retain records of successful invocations, use another destination type.", "FunctionName": "The name of the Lambda function.\n\n*Minimum* : `1`\n\n*Maximum* : `64`\n\n*Pattern* : `([a-zA-Z0-9-_]+)`", "MaximumEventAgeInSeconds": "The maximum age of a request that Lambda sends to a function for processing.", "MaximumRetryAttempts": "The maximum number of times to retry when the function returns an error.", @@ -23661,7 +24106,7 @@ "OnSuccess": "The destination configuration for successful invocations." }, "AWS::Lambda::EventInvokeConfig OnFailure": { - "Destination": "The Amazon Resource Name (ARN) of the destination resource.\n\nTo retain records of [asynchronous invocations](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) , you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.\n\nTo retain records of failed invocations from [Kinesis and DynamoDB event sources](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#event-source-mapping-destinations) , you can configure an Amazon SNS topic or Amazon SQS queue as the destination.\n\nTo retain records of failed invocations from [self-managed Kafka](https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination) or [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination) , you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination." + "Destination": "The Amazon Resource Name (ARN) of the destination resource.\n\nTo retain records of unsuccessful [asynchronous invocations](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) , you can configure an Amazon SNS topic, Amazon SQS queue, Amazon S3 bucket, Lambda function, or Amazon EventBridge event bus as the destination.\n\nTo retain records of failed invocations from [Kinesis](https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html) , [DynamoDB](https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html) , [self-managed Kafka](https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination) or [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination) , you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination." }, "AWS::Lambda::EventInvokeConfig OnSuccess": { "Destination": "The Amazon Resource Name (ARN) of the destination resource." @@ -23681,7 +24126,9 @@ "MaximumBatchingWindowInSeconds": "The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function.\n\n*Default ( Kinesis , DynamoDB , Amazon SQS event sources)* : 0\n\n*Default ( Amazon MSK , Kafka, Amazon MQ , Amazon DocumentDB event sources)* : 500 ms\n\n*Related setting:* For Amazon SQS event sources, when you set `BatchSize` to a value greater than 10, you must set `MaximumBatchingWindowInSeconds` to at least 1.", "MaximumRecordAgeInSeconds": "(Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is -1,\nwhich sets the maximum age to infinite. When the value is set to infinite, Lambda never discards old records.\n\n> The minimum valid value for maximum record age is 60s. Although values less than 60 and greater than -1 fall within the parameter's absolute range, they are not allowed", "MaximumRetryAttempts": "(Kinesis and DynamoDB Streams only) Discard records after the specified number of retries. The default value is -1,\nwhich sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, Lambda retries failed records until the record expires in the event source.", + "MetricsConfig": "", "ParallelizationFactor": "(Kinesis and DynamoDB Streams only) The number of batches to process concurrently from each shard. The default value is 1.", + "ProvisionedPollerConfig": "", "Queues": "(Amazon MQ) The name of the Amazon MQ broker destination queue to consume.", "ScalingConfig": "(Amazon SQS only) The scaling configuration for the event source. For more information, see [Configuring maximum concurrency for Amazon SQS event sources](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html#events-sqs-max-concurrency) .", "SelfManagedEventSource": "The self-managed Apache Kafka cluster for your event source.", @@ -23713,8 +24160,15 @@ "AWS::Lambda::EventSourceMapping FilterCriteria": { "Filters": "A list of filters." }, + "AWS::Lambda::EventSourceMapping MetricsConfig": { + "Metrics": "" + }, "AWS::Lambda::EventSourceMapping OnFailure": { - "Destination": "The Amazon Resource Name (ARN) of the destination resource.\n\nTo retain records of [asynchronous invocations](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) , you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.\n\nTo retain records of failed invocations from [Kinesis and DynamoDB event sources](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#event-source-mapping-destinations) , you can configure an Amazon SNS topic or Amazon SQS queue as the destination.\n\nTo retain records of failed invocations from [self-managed Kafka](https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination) or [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination) , you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination." + "Destination": "The Amazon Resource Name (ARN) of the destination resource.\n\nTo retain records of unsuccessful [asynchronous invocations](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) , you can configure an Amazon SNS topic, Amazon SQS queue, Amazon S3 bucket, Lambda function, or Amazon EventBridge event bus as the destination.\n\nTo retain records of failed invocations from [Kinesis](https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html) , [DynamoDB](https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html) , [self-managed Kafka](https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination) or [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination) , you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination." + }, + "AWS::Lambda::EventSourceMapping ProvisionedPollerConfig": { + "MaximumPollers": "", + "MinimumPollers": "" }, "AWS::Lambda::EventSourceMapping ScalingConfig": { "MaximumConcurrency": "Limits the number of concurrent instances that the Amazon SQS event source can invoke." @@ -23745,7 +24199,7 @@ "FunctionName": "The name of the Lambda function, up to 64 characters in length. If you don't specify a name, AWS CloudFormation generates one.\n\nIf you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.", "Handler": "The name of the method within your code that Lambda calls to run your function. Handler is required if the deployment package is a .zip file archive. The format includes the file name. It can also include namespaces and other qualifiers, depending on the runtime. For more information, see [Lambda programming model](https://docs.aws.amazon.com/lambda/latest/dg/foundation-progmodel.html) .", "ImageConfig": "Configuration values that override the container image Dockerfile settings. For more information, see [Container image settings](https://docs.aws.amazon.com/lambda/latest/dg/images-create.html#images-parms) .", - "KmsKeyArn": "The ARN of the AWS Key Management Service ( AWS KMS ) customer managed key that's used to encrypt your function's [environment variables](https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-encryption) . When [Lambda SnapStart](https://docs.aws.amazon.com/lambda/latest/dg/snapstart-security.html) is activated, Lambda also uses this key is to encrypt your function's snapshot. If you deploy your function using a container image, Lambda also uses this key to encrypt your function when it's deployed. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry ( Amazon ECR ). If you don't provide a customer managed key, Lambda uses a default service key.", + "KmsKeyArn": "The ARN of the AWS Key Management Service ( AWS KMS ) customer managed key that's used to encrypt the following resources:\n\n- The function's [environment variables](https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-encryption) .\n- The function's [Lambda SnapStart](https://docs.aws.amazon.com/lambda/latest/dg/snapstart-security.html) snapshots.\n- When used with `SourceKMSKeyArn` , the unzipped version of the .zip deployment package that's used for function invocations. For more information, see [Specifying a customer managed key for Lambda](https://docs.aws.amazon.com/lambda/latest/dg/encrypt-zip-package.html#enable-zip-custom-encryption) .\n- The optimized version of the container image that's used for function invocations. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry (Amazon ECR). For more information, see [Function lifecycle](https://docs.aws.amazon.com/lambda/latest/dg/images-create.html#images-lifecycle) .\n\nIf you don't provide a customer managed key, Lambda uses an [AWS owned key](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk) or an [AWS managed key](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) .", "Layers": "A list of [function layers](https://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html) to add to the function's execution environment. Specify each layer by its ARN, including the version.", "LoggingConfig": "The function's Amazon CloudWatch Logs configuration settings.", "MemorySize": "The amount of [memory available to the function](https://docs.aws.amazon.com/lambda/latest/dg/configuration-function-common.html#configuration-memory-console) at runtime. Increasing the function memory also increases its CPU allocation. The default value is 128 MB. The value can be any multiple of 1 MB. Note that new AWS accounts have reduced concurrency and memory quotas. AWS raises these quotas automatically based on your usage. You can also request a quota increase.", @@ -23766,7 +24220,7 @@ "S3Bucket": "An Amazon S3 bucket in the same AWS Region as your function. The bucket can be in a different AWS account .", "S3Key": "The Amazon S3 key of the deployment package.", "S3ObjectVersion": "For versioned objects, the version of the deployment package object to use.", - "SourceKMSKeyArn": "", + "SourceKMSKeyArn": "The ARN of the AWS Key Management Service ( AWS KMS ) customer managed key that's used to encrypt your function's .zip deployment package. If you don't provide a customer managed key, Lambda uses an [AWS owned key](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk) .", "ZipFile": "(Node.js and Python) The source code of your Lambda function. If you include your function source inline with this parameter, AWS CloudFormation places it in a file named `index` and zips it to create a [deployment package](https://docs.aws.amazon.com/lambda/latest/dg/gettingstarted-package.html) . This zip file cannot exceed 4MB. For the `Handler` property, the first part of the handler identifier must be `index` . For example, `index.handler` .\n\n> When you specify source code inline for a Node.js function, the `index` file that AWS CloudFormation creates uses the extension `.js` . This means that Lambda treats the file as a CommonJS module. ES modules aren't supported for inline functions. \n\nFor JSON, you must escape quotes and special characters such as newline ( `\\n` ) with a backslash.\n\nIf you specify a function that interacts with an AWS CloudFormation custom resource, you don't have to write your own functions to send responses to the custom resource that invoked the function. AWS CloudFormation provides a response module ( [cfn-response](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-lambda-function-code-cfnresponsemodule.html) ) that simplifies sending responses. See [Using AWS Lambda with AWS CloudFormation](https://docs.aws.amazon.com/lambda/latest/dg/services-cloudformation.html) for details." }, "AWS::Lambda::Function DeadLetterConfig": { @@ -25355,9 +25809,9 @@ "ExecutionRoleArn": "The Amazon Resource Name (ARN) of the execution role in IAM that allows MWAA to access AWS resources in your environment. For example, `arn:aws:iam::123456789:role/my-execution-role` . To learn more, see [Amazon MWAA Execution role](https://docs.aws.amazon.com/mwaa/latest/userguide/mwaa-create-role.html) .", "KmsKey": "The AWS Key Management Service (KMS) key to encrypt and decrypt the data in your environment. You can use an AWS KMS key managed by MWAA, or a customer-managed KMS key (advanced).", "LoggingConfiguration": "The Apache Airflow logs being sent to CloudWatch Logs: `DagProcessingLogs` , `SchedulerLogs` , `TaskLogs` , `WebserverLogs` , `WorkerLogs` .", - "MaxWebservers": "The maximum number of web servers that you want to run in your environment. Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for `MaxWebservers` when you interact with your Apache Airflow environment using Apache Airflow REST API, or the Apache Airflow CLI. For example, in scenarios where your workload requires network calls to the Apache Airflow REST API with a high transaction-per-second (TPS) rate, Amazon MWAA will increase the number of web servers up to the number set in `MaxWebserers` . As TPS rates decrease Amazon MWAA disposes of the additional web servers, and scales down to the number set in `MinxWebserers` .\n\nValid values: Accepts between `2` and `5` . Defaults to `2` .", + "MaxWebservers": "The maximum number of web servers that you want to run in your environment. Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for `MaxWebservers` when you interact with your Apache Airflow environment using Apache Airflow REST API, or the Apache Airflow CLI. For example, in scenarios where your workload requires network calls to the Apache Airflow REST API with a high transaction-per-second (TPS) rate, Amazon MWAA will increase the number of web servers up to the number set in `MaxWebserers` . As TPS rates decrease Amazon MWAA disposes of the additional web servers, and scales down to the number set in `MinxWebserers` .\n\nValid values: For environments larger than mw1.micro, accepts values from `2` to `5` . Defaults to `2` for all environment sizes except mw1.micro, which defaults to `1` .", "MaxWorkers": "The maximum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers up to the number you specify in the `MaxWorkers` field. For example, `20` . When there are no more tasks running, and no more in the queue, MWAA disposes of the extra workers leaving the one worker that is included with your environment, or the number you specify in `MinWorkers` .", - "MinWebservers": "The minimum number of web servers that you want to run in your environment. Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for `MaxWebservers` when you interact with your Apache Airflow environment using Apache Airflow REST API, or the Apache Airflow CLI. As the transaction-per-second rate, and the network load, decrease, Amazon MWAA disposes of the additional web servers, and scales down to the number set in `MinxWebserers` .\n\nValid values: Accepts between `2` and `5` . Defaults to `2` .", + "MinWebservers": "The minimum number of web servers that you want to run in your environment. Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for `MaxWebservers` when you interact with your Apache Airflow environment using Apache Airflow REST API, or the Apache Airflow CLI. As the transaction-per-second rate, and the network load, decrease, Amazon MWAA disposes of the additional web servers, and scales down to the number set in `MinxWebserers` .\n\nValid values: For environments larger than mw1.micro, accepts values from `2` to `5` . Defaults to `2` for all environment sizes except mw1.micro, which defaults to `1` .", "MinWorkers": "The minimum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers up to the number you specify in the `MaxWorkers` field. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra workers leaving the worker count you specify in the `MinWorkers` field. For example, `2` .", "Name": "The name of your Amazon MWAA environment.", "NetworkConfiguration": "The VPC networking components used to secure and enable network traffic between the AWS resources for your environment. To learn more, see [About networking on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/networking-about.html) .", @@ -27527,8 +27981,8 @@ "AWS::MediaStore::Container": { "AccessLoggingEnabled": "The state of access logging on the container. This value is `false` by default, indicating that AWS Elemental MediaStore does not send access logs to Amazon CloudWatch Logs. When you enable access logging on the container, MediaStore changes this value to `true` , indicating that the service delivers access logs for objects stored in that container to CloudWatch Logs.", "ContainerName": "The name for the container. The name must be from 1 to 255 characters. Container names must be unique to your AWS account within a specific region. As an example, you could create a container named `movies` in every region, as long as you don\u2019t have an existing container with that name.", - "CorsPolicy": "Sets the cross-origin resource sharing (CORS) configuration on a container so that the container can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com to access your AWS Elemental MediaStore container at my.example.container.com by using the browser's XMLHttpRequest capability.\n\nTo enable CORS on a container, you attach a CORS policy to the container. In the CORS policy, you configure rules that identify origins and the HTTP methods that can be executed on your container. The policy can contain up to 398,000 characters. You can add up to 100 rules to a CORS policy. If more than one rule applies, the service uses the first applicable rule listed.\n\nTo learn more about CORS, see [Cross-Origin Resource Sharing (CORS) in AWS Elemental MediaStore](https://docs.aws.amazon.com/mediastore/latest/ug/cors-policy.html) .", - "LifecyclePolicy": "Writes an object lifecycle policy to a container. If the container already has an object lifecycle policy, the service replaces the existing policy with the new policy. It takes up to 20 minutes for the change to take effect.\n\nFor information about how to construct an object lifecycle policy, see [Components of an Object Lifecycle Policy](https://docs.aws.amazon.com/mediastore/latest/ug/policies-object-lifecycle-components.html) .", + "CorsPolicy": "> End of support notice: On November 13, 2025, AWS will discontinue support for AWS Elemental MediaStore. After November 13, 2025, you will no longer be able to access the AWS Elemental MediaStore console or AWS Elemental MediaStore resources. For more information, visit this [blog post](https://docs.aws.amazon.com/media/support-for-aws-elemental-mediastore-ending-soon/) . \n\nSets the cross-origin resource sharing (CORS) configuration on a container so that the container can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com to access your AWS Elemental MediaStore container at my.example.container.com by using the browser's XMLHttpRequest capability.\n\nTo enable CORS on a container, you attach a CORS policy to the container. In the CORS policy, you configure rules that identify origins and the HTTP methods that can be executed on your container. The policy can contain up to 398,000 characters. You can add up to 100 rules to a CORS policy. If more than one rule applies, the service uses the first applicable rule listed.\n\nTo learn more about CORS, see [Cross-Origin Resource Sharing (CORS) in AWS Elemental MediaStore](https://docs.aws.amazon.com/mediastore/latest/ug/cors-policy.html) .", + "LifecyclePolicy": "> End of support notice: On November 13, 2025, AWS will discontinue support for AWS Elemental MediaStore. After November 13, 2025, you will no longer be able to access the AWS Elemental MediaStore console or AWS Elemental MediaStore resources. For more information, visit this [blog post](https://docs.aws.amazon.com/media/support-for-aws-elemental-mediastore-ending-soon/) . \n\nWrites an object lifecycle policy to a container. If the container already has an object lifecycle policy, the service replaces the existing policy with the new policy. It takes up to 20 minutes for the change to take effect.\n\nFor information about how to construct an object lifecycle policy, see [Components of an Object Lifecycle Policy](https://docs.aws.amazon.com/mediastore/latest/ug/policies-object-lifecycle-components.html) .", "MetricPolicy": "The metric policy that is associated with the container. A metric policy allows AWS Elemental MediaStore to send metrics to Amazon CloudWatch. In the policy, you must indicate whether you want MediaStore to send container-level metrics. You can also include rules to define groups of objects that you want MediaStore to send object-level metrics for.\n\nTo view examples of how to construct a metric policy for your use case, see [Example Metric Policies](https://docs.aws.amazon.com/mediastore/latest/ug/policies-metric-examples.html) .", "Policy": "Creates an access policy for the specified container to restrict the users and clients that can access it. For information about the data that is included in an access policy, see the [AWS Identity and Access Management User Guide](https://docs.aws.amazon.com/iam/) .\n\nFor this release of the REST API, you can create only one policy for a container. If you enter `PutContainerPolicy` twice, the second command modifies the existing policy.", "Tags": "" @@ -39425,7 +39879,7 @@ "EnableIAMDatabaseAuthentication": "A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.\n\nFor more information, see [IAM Database Authentication](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html) in the *Amazon Aurora User Guide.*\n\nValid for: Aurora DB clusters only", "EnableLocalWriteForwarding": "Specifies whether read replicas can forward write operations to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances.\n\nValid for: Aurora DB clusters only", "Engine": "The name of the database engine to be used for this DB cluster.\n\nValid Values:\n\n- `aurora-mysql`\n- `aurora-postgresql`\n- `mysql`\n- `postgres`\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", - "EngineLifecycleSupport": "The life cycle type for this DB cluster.\n\n> By default, this value is set to `open-source-rds-extended-support` , which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to `open-source-rds-extended-support-disabled` . In this case, creating the DB cluster will fail if the DB major version is past its end of standard support date. \n\nYou can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections:\n\n- Amazon Aurora (PostgreSQL only) - [Using Amazon RDS Extended Support](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/extended-support.html) in the *Amazon Aurora User Guide*\n- Amazon RDS - [Using Amazon RDS Extended Support](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/extended-support.html) in the *Amazon RDS User Guide*\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n\nValid Values: `open-source-rds-extended-support | open-source-rds-extended-support-disabled`\n\nDefault: `open-source-rds-extended-support`", + "EngineLifecycleSupport": "The life cycle type for this DB cluster.\n\n> By default, this value is set to `open-source-rds-extended-support` , which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to `open-source-rds-extended-support-disabled` . In this case, creating the DB cluster will fail if the DB major version is past its end of standard support date. \n\nYou can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections:\n\n- Amazon Aurora - [Using Amazon RDS Extended Support](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/extended-support.html) in the *Amazon Aurora User Guide*\n- Amazon RDS - [Using Amazon RDS Extended Support](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/extended-support.html) in the *Amazon RDS User Guide*\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n\nValid Values: `open-source-rds-extended-support | open-source-rds-extended-support-disabled`\n\nDefault: `open-source-rds-extended-support`", "EngineMode": "The DB engine mode of the DB cluster, either `provisioned` or `serverless` .\n\nThe `serverless` engine mode only applies for Aurora Serverless v1 DB clusters. Aurora Serverless v2 DB clusters use the `provisioned` engine mode.\n\nFor information about limitations and requirements for Serverless DB clusters, see the following sections in the *Amazon Aurora User Guide* :\n\n- [Limitations of Aurora Serverless v1](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations)\n- [Requirements for Aurora Serverless v2](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html)\n\nValid for Cluster Type: Aurora DB clusters only", "EngineVersion": "The version number of the database engine to use.\n\nTo list all of the available engine versions for Aurora MySQL version 2 (5.7-compatible) and version 3 (8.0-compatible), use the following command:\n\n`aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"`\n\nYou can supply either `5.7` or `8.0` to use the default engine version for Aurora MySQL version 2 or version 3, respectively.\n\nTo list all of the available engine versions for Aurora PostgreSQL, use the following command:\n\n`aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"`\n\nTo list all of the available engine versions for RDS for MySQL, use the following command:\n\n`aws rds describe-db-engine-versions --engine mysql --query \"DBEngineVersions[].EngineVersion\"`\n\nTo list all of the available engine versions for RDS for PostgreSQL, use the following command:\n\n`aws rds describe-db-engine-versions --engine postgres --query \"DBEngineVersions[].EngineVersion\"`\n\n*Aurora MySQL*\n\nFor information, see [Database engine updates for Amazon Aurora MySQL](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Updates.html) in the *Amazon Aurora User Guide* .\n\n*Aurora PostgreSQL*\n\nFor information, see [Amazon Aurora PostgreSQL releases and engine versions](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraPostgreSQL.Updates.20180305.html) in the *Amazon Aurora User Guide* .\n\n*MySQL*\n\nFor information, see [Amazon RDS for MySQL](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_MySQL.html#MySQL.Concepts.VersionMgmt) in the *Amazon RDS User Guide* .\n\n*PostgreSQL*\n\nFor information, see [Amazon RDS for PostgreSQL](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_PostgreSQL.html#PostgreSQL.Concepts) in the *Amazon RDS User Guide* .\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "GlobalClusterIdentifier": "If you are configuring an Aurora global database cluster and want your Aurora DB cluster to be a secondary member in the global database cluster, specify the global cluster ID of the global database cluster. To define the primary database cluster of the global cluster, use the [AWS::RDS::GlobalCluster](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-globalcluster.html) resource.\n\nIf you aren't configuring a global database cluster, don't specify this property.\n\n> To remove the DB cluster from a global database cluster, specify an empty value for the `GlobalClusterIdentifier` property. \n\nFor information about Aurora global databases, see [Working with Amazon Aurora Global Databases](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html) in the *Amazon Aurora User Guide* .\n\nValid for: Aurora DB clusters only", @@ -39485,7 +39939,7 @@ }, "AWS::RDS::DBCluster ServerlessV2ScalingConfiguration": { "MaxCapacity": "The maximum number of Aurora capacity units (ACUs) for a DB instance in an Aurora Serverless v2 cluster. You can specify ACU values in half-step increments, such as 40, 40.5, 41, and so on. The largest value that you can use is 128.\n\nThe maximum capacity must be higher than 0.5 ACUs. For more information, see [Choosing the maximum Aurora Serverless v2 capacity setting for a cluster](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.setting-capacity.html#aurora-serverless-v2.max_capacity_considerations) in the *Amazon Aurora User Guide* .\n\nAurora automatically sets certain parameters for Aurora Serverless V2 DB instances to values that depend on the maximum ACU value in the capacity range. When you update the maximum capacity value, the `ParameterApplyStatus` value for the DB instance changes to `pending-reboot` . You can update the parameter values by rebooting the DB instance after changing the capacity range.", - "MinCapacity": "The minimum number of Aurora capacity units (ACUs) for a DB instance in an Aurora Serverless v2 cluster. You can specify ACU values in half-step increments, such as 8, 8.5, 9, and so on. The smallest value that you can use is 0.5." + "MinCapacity": "The minimum number of Aurora capacity units (ACUs) for a DB instance in an Aurora Serverless v2 cluster. You can specify ACU values in half-step increments, such as 8, 8.5, 9, and so on. For Aurora versions that support the Aurora Serverless v2 auto-pause feature, the smallest value that you can use is 0. For versions that don't support Aurora Serverless v2 auto-pause, the smallest value that you can use is 0.5." }, "AWS::RDS::DBCluster Tag": { "Key": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", @@ -39731,10 +40185,14 @@ "EngineLifecycleSupport": "The life cycle type for this global database cluster.\n\n> By default, this value is set to `open-source-rds-extended-support` , which enrolls your global cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to `open-source-rds-extended-support-disabled` . In this case, creating the global cluster will fail if the DB major version is past its end of standard support date. \n\nThis setting only applies to Aurora PostgreSQL-based global databases.\n\nYou can use this setting to enroll your global cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your global cluster past the end of standard support for that engine version. For more information, see [Using Amazon RDS Extended Support](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/extended-support.html) in the *Amazon Aurora User Guide* .\n\nValid Values: `open-source-rds-extended-support | open-source-rds-extended-support-disabled`\n\nDefault: `open-source-rds-extended-support`", "EngineVersion": "The engine version to use for this global database cluster.\n\nConstraints:\n\n- Can't be specified if `SourceDBClusterIdentifier` is specified. In this case, Amazon Aurora uses the engine version of the source DB cluster.", "GlobalClusterIdentifier": "The cluster identifier for this global database cluster. This parameter is stored as a lowercase string.", + "GlobalEndpoint": "The writer endpoint for the new global database cluster. This endpoint always points to the writer DB instance in the current primary cluster.", "SourceDBClusterIdentifier": "The Amazon Resource Name (ARN) to use as the primary cluster of the global database.\n\nIf you provide a value for this parameter, don't specify values for the following settings because Amazon Aurora uses the values from the specified source DB cluster:\n\n- `DatabaseName`\n- `Engine`\n- `EngineVersion`\n- `StorageEncrypted`", "StorageEncrypted": "Specifies whether to enable storage encryption for the new global database cluster.\n\nConstraints:\n\n- Can't be specified if `SourceDBClusterIdentifier` is specified. In this case, Amazon Aurora uses the setting from the source DB cluster.", "Tags": "Metadata assigned to an Amazon RDS resource consisting of a key-value pair.\n\nFor more information, see [Tagging Amazon RDS resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide* or [Tagging Amazon Aurora and Amazon RDS resources](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Tagging.html) in the *Amazon Aurora User Guide* ." }, + "AWS::RDS::GlobalCluster GlobalEndpoint": { + "Address": "The writer endpoint for the new global database cluster. This endpoint always points to the writer DB instance in the current primary cluster." + }, "AWS::RDS::GlobalCluster Tag": { "Key": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", "Value": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\")." @@ -39818,6 +40276,32 @@ "Key": "", "Value": "" }, + "AWS::Rbin::Rule": { + "Description": "The retention rule description.", + "ExcludeResourceTags": "", + "LockConfiguration": "Information about the retention rule lock configuration.", + "ResourceTags": "[Tag-level retention rules only] Information about the resource tags used to identify resources that are retained by the retention rule.", + "ResourceType": "The resource type retained by the retention rule.", + "RetentionPeriod": "Information about the retention period for which the retention rule is to retain resources.", + "Status": "The state of the retention rule. Only retention rules that are in the `available` state retain resources.", + "Tags": "Information about the tags assigned to the retention rule." + }, + "AWS::Rbin::Rule ResourceTag": { + "ResourceTagKey": "The tag key.", + "ResourceTagValue": "The tag value." + }, + "AWS::Rbin::Rule RetentionPeriod": { + "RetentionPeriodUnit": "The unit of time in which the retention period is measured. Currently, only `DAYS` is supported.", + "RetentionPeriodValue": "The period value for which the retention rule is to retain resources. The period is measured using the unit specified for *RetentionPeriodUnit* ." + }, + "AWS::Rbin::Rule Tag": { + "Key": "The tag key.", + "Value": "The tag value." + }, + "AWS::Rbin::Rule UnlockDelay": { + "UnlockDelayUnit": "The unit of time in which to measure the unlock delay. Currently, the unlock delay can be measure only in days.", + "UnlockDelayValue": "The unlock delay period, measured in the unit specified for *UnlockDelayUnit* ." + }, "AWS::Redshift::Cluster": { "AllowVersionUpgrade": "If `true` , major version upgrades can be applied during the maintenance window to the Amazon Redshift engine that is running on the cluster.\n\nWhen a new major version of the Amazon Redshift engine is released, you can request that the service automatically apply upgrades during the maintenance window to the Amazon Redshift engine that is running on your cluster.\n\nDefault: `true`", "AquaConfigurationStatus": "This parameter is retired. It does not set the AQUA configuration status. Amazon Redshift automatically determines whether to use AQUA (Advanced Query Accelerator).", @@ -40783,8 +41267,11 @@ "BlockOverrideDomain": "The custom DNS record to send back in response to the query. Used for the rule action `BLOCK` with a `BlockResponse` setting of `OVERRIDE` .", "BlockOverrideTtl": "The recommended amount of time, in seconds, for the DNS resolver or web browser to cache the provided override record. Used for the rule action `BLOCK` with a `BlockResponse` setting of `OVERRIDE` .", "BlockResponse": "The way that you want DNS Firewall to block the request. Used for the rule action setting `BLOCK` .\n\n- `NODATA` - Respond indicating that the query was successful, but no response is available for it.\n- `NXDOMAIN` - Respond indicating that the domain name that's in the query doesn't exist.\n- `OVERRIDE` - Provide a custom override in the response. This option requires custom handling details in the rule's `BlockOverride*` settings.", + "ConfidenceThreshold": "The confidence threshold for DNS Firewall Advanced. You must provide this value when you create a DNS Firewall Advanced rule. The confidence level values mean:\n\n- `LOW` : Provides the highest detection rate for threats, but also increases false positives.\n- `MEDIUM` : Provides a balance between detecting threats and false positives.\n- `HIGH` : Detects only the most well corroborated threats with a low rate of false positives.", + "DnsThreatProtection": "The type of the DNS Firewall Advanced rule. Valid values are:\n\n- `DGA` : Domain generation algorithms detection. DGAs are used by attackers to generate a large number of domains to to launch malware attacks.\n- `DNS_TUNNELING` : DNS tunneling detection. DNS tunneling is used by attackers to exfiltrate data from the client by using the DNS tunnel without making a network connection to the client.", "FirewallDomainListId": "The ID of the domain list that's used in the rule.", "FirewallDomainRedirectionAction": "How you want the the rule to evaluate DNS redirection in the DNS redirection chain, such as CNAME, or DNAME.\n\n`Inspect_Redirection_Domain` (Default) inspects all domains in the redirection chain. The individual domains in the redirection chain must be added to the domain list.\n\n`Trust_Redirection_Domain` inspects only the first domain in the redirection chain. You don't need to add the subsequent domains in the domain in the redirection list to the domain list.", + "FirewallThreatProtectionId": "ID of the DNS Firewall Advanced rule.", "Priority": "The priority of the rule in the rule group. This value must be unique within the rule group. DNS Firewall processes the rules in a rule group by order of priority, starting from the lowest setting.", "Qtype": "The DNS query type you want the rule to evaluate. Allowed values are;\n\n- A: Returns an IPv4 address.\n- AAAA: Returns an Ipv6 address.\n- CAA: Restricts CAs that can create SSL/TLS certifications for the domain.\n- CNAME: Returns another domain name.\n- DS: Record that identifies the DNSSEC signing key of a delegated zone.\n- MX: Specifies mail servers.\n- NAPTR: Regular-expression-based rewriting of domain names.\n- NS: Authoritative name servers.\n- PTR: Maps an IP address to a domain name.\n- SOA: Start of authority record for the zone.\n- SPF: Lists the servers authorized to send emails from a domain.\n- SRV: Application specific values that identify servers.\n- TXT: Verifies email senders and application-specific values.\n- A query type you define by using the DNS type ID, for example 28 for AAAA. The values must be defined as TYPE NUMBER , where the NUMBER can be 1-65334, for example, TYPE28. For more information, see [List of DNS record types](https://docs.aws.amazon.com/https://en.wikipedia.org/wiki/List_of_DNS_record_types) ." }, @@ -41394,11 +41881,27 @@ "BucketEncryption": "Specifies default encryption for a bucket using server-side encryption with Amazon S3 managed keys (SSE-S3) or AWS KMS keys (SSE-KMS). For information about default encryption for directory buckets, see [Setting and monitoring default encryption for directory buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-bucket-encryption.html) in the *Amazon S3 User Guide* .", "BucketName": "A name for the bucket. The bucket name must contain only lowercase letters, numbers, and hyphens (-). A directory bucket name must be unique in the chosen Availability Zone. The bucket name must also follow the format `*bucket_base_name* -- *az_id* --x-s3` (for example, `*bucket_base_name* -- *usw2-az1* --x-s3` ). If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the bucket name. For information about bucket naming restrictions, see [Directory bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) in the *Amazon S3 User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you need to replace the resource, specify a new name.", "DataRedundancy": "The number of Availability Zone that's used for redundancy for the bucket.", + "LifecycleConfiguration": "Container for lifecycle rules. You can add as many as 1000 rules.\n\nFor more information see, [Managing your storage lifecycle](https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) in the *Amazon S3 User Guide* .", "LocationName": "The name of the location where the bucket will be created.\n\nFor directory buckets, the name of the location is the AZ ID of the Availability Zone where the bucket will be created. An example AZ ID value is `usw2-az1` ." }, + "AWS::S3Express::DirectoryBucket AbortIncompleteMultipartUpload": { + "DaysAfterInitiation": "Specifies the number of days after which Amazon S3 aborts an incomplete multipart upload." + }, "AWS::S3Express::DirectoryBucket BucketEncryption": { "ServerSideEncryptionConfiguration": "Specifies the default server-side-encryption configuration." }, + "AWS::S3Express::DirectoryBucket LifecycleConfiguration": { + "Rules": "Specifies lifecycle configuration rules for an Amazon S3 bucket." + }, + "AWS::S3Express::DirectoryBucket Rule": { + "AbortIncompleteMultipartUpload": "", + "ExpirationInDays": "", + "Id": "Unique identifier for the rule. The value can't be longer than 255 characters.", + "ObjectSizeGreaterThan": "", + "ObjectSizeLessThan": "", + "Prefix": "Object key prefix that identifies one or more objects to which this rule applies.\n\n> Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see [XML related object key constraints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) .", + "Status": "If `Enabled` , the rule is currently being applied. If `Disabled` , the rule is not currently being applied." + }, "AWS::S3Express::DirectoryBucket ServerSideEncryptionByDefault": { "KMSMasterKeyID": "AWS Key Management Service (KMS) customer managed key ID to use for the default encryption. This parameter is allowed only if `SSEAlgorithm` is set to `aws:kms` .\n\nYou can specify this parameter with the key ID or the Amazon Resource Name (ARN) of the KMS key. You can\u2019t use the key alias of the KMS key.\n\n- Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab`\n- Key ARN: `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab`\n\nIf you are using encryption with cross-account or AWS service operations, you must use a fully qualified KMS key ARN. For more information, see [Using encryption for cross-account operations](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-bucket-encryption.html#s3-express-bucket-encryption-update-bucket-policy) .\n\n> Your SSE-KMS configuration can only support 1 [customer managed key](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk) per directory bucket for the lifetime of the bucket. [AWS managed key](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) ( `aws/s3` ) isn't supported. Also, after you specify a customer managed key for SSE-KMS and upload objects with this configuration, you can't override the customer managed key for your SSE-KMS configuration. To use a new customer manager key for your data, we recommend copying your existing objects to a new directory bucket with a new customer managed key. > Amazon S3 only supports symmetric encryption KMS keys. For more information, see [Asymmetric keys in AWS KMS](https://docs.aws.amazon.com//kms/latest/developerguide/symmetric-asymmetric.html) in the *AWS Key Management Service Developer Guide* .", "SSEAlgorithm": "Server-side encryption algorithm to use for the default encryption.\n\n> For directory buckets, there are only two supported values for server-side encryption: `AES256` and `aws:kms` ." @@ -42597,6 +43100,7 @@ "InstanceType": "The instance type of the instance group of a SageMaker HyperPod cluster.", "LifeCycleConfig": "The lifecycle configuration for a SageMaker HyperPod cluster.", "OnStartDeepHealthChecks": "A flag indicating whether deep health checks should be performed when the HyperPod cluster instance group is created or updated. Deep health checks are comprehensive, invasive tests that validate the health of the underlying hardware and infrastructure components.", + "OverrideVpcConfig": "", "ThreadsPerCore": "The number of threads per CPU core you specified under `CreateCluster` ." }, "AWS::SageMaker::Cluster ClusterInstanceStorageConfig": { @@ -45429,6 +45933,7 @@ "ExecutionRoleArn": "The ARN of the IAM role to be used to run the canary. This role must already exist, and must include `lambda.amazonaws.com` as a principal in the trust policy. The role must also have the following permissions:\n\n- `s3:PutObject`\n- `s3:GetBucketLocation`\n- `s3:ListAllMyBuckets`\n- `cloudwatch:PutMetricData`\n- `logs:CreateLogGroup`\n- `logs:CreateLogStream`\n- `logs:PutLogEvents`", "FailureRetentionPeriod": "The number of days to retain data about failed runs of this canary. If you omit this field, the default of 31 days is used. The valid range is 1 to 455 days.", "Name": "The name for this canary. Be sure to give it a descriptive name that distinguishes it from other canaries in your account.\n\nDo not include secrets or proprietary information in your canary names. The canary name makes up part of the canary ARN, and the ARN is included in outbound calls over the internet. For more information, see [Security Considerations for Synthetics Canaries](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/servicelens_canaries_security.html) .", + "ProvisionedResourceCleanup": "", "ResourcesToReplicateTags": "To have the tags that you apply to this canary also be applied to the Lambda function that the canary uses, specify this property with the value `lambda-function` . If you do this, CloudWatch Synthetics will keep the tags of the canary and the Lambda function synchronized. Any future changes you make to the canary's tags will also be applied to the function.", "RunConfig": "A structure that contains input information for a canary run. If you omit this structure, the frequency of the canary is used as canary's timeout value, up to a maximum of 900 seconds.", "RuntimeVersion": "Specifies the runtime version to use for the canary. For more information about runtime versions, see [Canary Runtime Versions](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_Library.html) .", @@ -45950,6 +46455,7 @@ "AWS::VpcLattice::AccessLogSubscription": { "DestinationArn": "The Amazon Resource Name (ARN) of the destination. The supported destination types are CloudWatch Log groups, Kinesis Data Firehose delivery streams, and Amazon S3 buckets.", "ResourceIdentifier": "The ID or Amazon Resource Name (ARN) of the service network or service.", + "ServiceNetworkLogType": "", "Tags": "The tags for the access log subscription." }, "AWS::VpcLattice::AccessLogSubscription Tag": { @@ -46062,8 +46568,12 @@ "AWS::VpcLattice::ServiceNetwork": { "AuthType": "The type of IAM policy.\n\n- `NONE` : The resource does not use an IAM policy. This is the default.\n- `AWS_IAM` : The resource uses an IAM policy. When this type is used, auth is enabled and an auth policy is required.", "Name": "The name of the service network. The name must be unique to the account. The valid characters are a-z, 0-9, and hyphens (-). You can't use a hyphen as the first or last character, or immediately after another hyphen.\n\nIf you don't specify a name, CloudFormation generates one. However, if you specify a name, and later want to replace the resource, you must specify a new name.", + "SharingConfig": "", "Tags": "The tags for the service network." }, + "AWS::VpcLattice::ServiceNetwork SharingConfig": { + "enabled": "" + }, "AWS::VpcLattice::ServiceNetwork Tag": { "Key": "The tag key.", "Value": "The tag value." @@ -47022,49 +47532,49 @@ "Type": "The type of the AI Agent." }, "AWS::Wisdom::AIAgent AIAgentConfiguration": { - "AnswerRecommendationAIAgentConfiguration": "", - "ManualSearchAIAgentConfiguration": "" + "AnswerRecommendationAIAgentConfiguration": "The configuration for AI Agents of type `ANSWER_RECOMMENDATION` .", + "ManualSearchAIAgentConfiguration": "The configuration for AI Agents of type `MANUAL_SEARCH` ." }, "AWS::Wisdom::AIAgent AnswerRecommendationAIAgentConfiguration": { - "AnswerGenerationAIPromptId": "", - "AssociationConfigurations": "", - "IntentLabelingGenerationAIPromptId": "", - "QueryReformulationAIPromptId": "" + "AnswerGenerationAIPromptId": "The AI Prompt identifier for the Answer Generation prompt used by the `ANSWER_RECOMMENDATION` AI Agent.", + "AssociationConfigurations": "The association configurations for overriding behavior on this AI Agent.", + "IntentLabelingGenerationAIPromptId": "The AI Prompt identifier for the Intent Labeling prompt used by the `ANSWER_RECOMMENDATION` AI Agent.", + "QueryReformulationAIPromptId": "The AI Prompt identifier for the Query Reformulation prompt used by the `ANSWER_RECOMMENDATION` AI Agent." }, "AWS::Wisdom::AIAgent AssociationConfiguration": { - "AssociationConfigurationData": "", - "AssociationId": "", - "AssociationType": "" + "AssociationConfigurationData": "A typed union of the data of the configuration for an Amazon Q in Connect Assistant Association.", + "AssociationId": "The identifier of the association for this Association Configuration.", + "AssociationType": "The type of the association for this Association Configuration." }, "AWS::Wisdom::AIAgent AssociationConfigurationData": { - "KnowledgeBaseAssociationConfigurationData": "" + "KnowledgeBaseAssociationConfigurationData": "The data of the configuration for a `KNOWLEDGE_BASE` type Amazon Q in Connect Assistant Association." }, "AWS::Wisdom::AIAgent KnowledgeBaseAssociationConfigurationData": { - "ContentTagFilter": "", - "MaxResults": "", + "ContentTagFilter": "An object that can be used to specify Tag conditions.", + "MaxResults": "The maximum number of results to return per page.", "OverrideKnowledgeBaseSearchType": "" }, "AWS::Wisdom::AIAgent ManualSearchAIAgentConfiguration": { - "AnswerGenerationAIPromptId": "", - "AssociationConfigurations": "" + "AnswerGenerationAIPromptId": "The AI Prompt identifier for the Answer Generation prompt used by the `ANSWER_RECOMMENDATION` AI Agent.", + "AssociationConfigurations": "The association configurations for overriding behavior on this AI Agent." }, "AWS::Wisdom::AIAgent OrCondition": { "AndConditions": "", - "TagCondition": "" + "TagCondition": "A leaf node condition which can be used to specify a tag condition." }, "AWS::Wisdom::AIAgent TagCondition": { - "Key": "", - "Value": "" + "Key": "The tag key in the tag condition.", + "Value": "The tag value in the tag condition." }, "AWS::Wisdom::AIAgent TagFilter": { - "AndConditions": "", - "OrConditions": "", - "TagCondition": "" + "AndConditions": "A list of conditions which would be applied together with an `AND` condition.", + "OrConditions": "A list of conditions which would be applied together with an `OR` condition.", + "TagCondition": "A leaf node condition which can be used to specify a tag condition." }, "AWS::Wisdom::AIAgentVersion": { - "AIAgentId": "", + "AIAgentId": "The identifier of the AI Agent.", "AssistantId": "", - "ModifiedTimeSeconds": "" + "ModifiedTimeSeconds": "The time the AI Agent version was last modified in seconds." }, "AWS::Wisdom::AIPrompt": { "ApiFormat": "The API format used for this AI Prompt.", @@ -47078,15 +47588,15 @@ "Type": "The type of this AI Prompt." }, "AWS::Wisdom::AIPrompt AIPromptTemplateConfiguration": { - "TextFullAIPromptEditTemplateConfiguration": "" + "TextFullAIPromptEditTemplateConfiguration": "The configuration for a prompt template that supports full textual prompt configuration using a YAML prompt." }, "AWS::Wisdom::AIPrompt TextFullAIPromptEditTemplateConfiguration": { - "Text": "" + "Text": "The YAML text for the AI Prompt template." }, "AWS::Wisdom::AIPromptVersion": { - "AIPromptId": "", - "AssistantId": "", - "ModifiedTimeSeconds": "" + "AIPromptId": "The identifier of the Amazon Q in Connect AI prompt.", + "AssistantId": "The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN.", + "ModifiedTimeSeconds": "The time the AI Prompt version was last modified in seconds." }, "AWS::Wisdom::Assistant": { "Description": "The description of the assistant.", diff --git a/schema_source/cloudformation.schema.json b/schema_source/cloudformation.schema.json index 50de7c1e8..1a7efe012 100644 --- a/schema_source/cloudformation.schema.json +++ b/schema_source/cloudformation.schema.json @@ -1718,7 +1718,7 @@ "properties": { "AnalyzerConfiguration": { "$ref": "#/definitions/AWS::AccessAnalyzer::Analyzer.AnalyzerConfiguration", - "markdownDescription": "Contains information about the configuration of an unused access analyzer for an AWS organization or account.", + "markdownDescription": "Contains information about the configuration of an analyzer for an AWS organization or account.", "title": "AnalyzerConfiguration" }, "AnalyzerName": { @@ -1738,7 +1738,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An array of key-value pairs to apply to the analyzer.", + "markdownDescription": "An array of key-value pairs to apply to the analyzer. You can use the set of Unicode letters, digits, whitespace, `_` , `.` , `/` , `=` , `+` , and `-` .\n\nFor the tag key, you can specify a value that is 1 to 128 characters in length and cannot be prefixed with `aws:` .\n\nFor the tag value, you can specify a value that is 0 to 256 characters in length.", "title": "Tags", "type": "array" }, @@ -1779,7 +1779,7 @@ "properties": { "UnusedAccessConfiguration": { "$ref": "#/definitions/AWS::AccessAnalyzer::Analyzer.UnusedAccessConfiguration", - "markdownDescription": "Specifies the configuration of an unused access analyzer for an AWS organization or account. External access analyzers do not support any configuration.", + "markdownDescription": "Specifies the configuration of an unused access analyzer for an AWS organization or account.", "title": "UnusedAccessConfiguration" } }, @@ -1855,7 +1855,7 @@ "additionalProperties": false, "properties": { "UnusedAccessAge": { - "markdownDescription": "The specified access age in days for which to generate findings for unused access. For example, if you specify 90 days, the analyzer will generate findings for IAM entities within the accounts of the selected organization for any access that hasn't been used in 90 or more days since the analyzer's last scan. You can choose a value between 1 and 180 days.", + "markdownDescription": "The specified access age in days for which to generate findings for unused access. For example, if you specify 90 days, the analyzer will generate findings for IAM entities within the accounts of the selected organization for any access that hasn't been used in 90 or more days since the analyzer's last scan. You can choose a value between 1 and 365 days.", "title": "UnusedAccessAge", "type": "number" } @@ -27262,7 +27262,7 @@ "type": "string" }, "Version": { - "markdownDescription": "The version number of the launch template, `$Latest` , or `$Default` .\n\nIf the value is `$Latest` , the latest version of the launch template is used. If the value is `$Default` , the default version of the launch template is used.\n\n> If the AMI ID that's used in a compute environment is from the launch template, the AMI isn't changed when the compute environment is updated. It's only changed if the `updateToLatestImageVersion` parameter for the compute environment is set to `true` . During an infrastructure update, if either `$Latest` or `$Default` is specified, AWS Batch re-evaluates the launch template version, and it might use a different version of the launch template. This is the case even if the launch template isn't specified in the update. When updating a compute environment, changing the launch template requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* . \n\nDefault: `$Default` .", + "markdownDescription": "The version number of the launch template, `$Default` , or `$Latest` .\n\nIf the value is `$Default` , the default version of the launch template is used. If the value is `$Latest` , the latest version of the launch template is used.\n\n> If the AMI ID that's used in a compute environment is from the launch template, the AMI isn't changed when the compute environment is updated. It's only changed if the `updateToLatestImageVersion` parameter for the compute environment is set to `true` . During an infrastructure update, if either `$Default` or `$Latest` is specified, AWS Batch re-evaluates the launch template version, and it might use a different version of the launch template. This is the case even if the launch template isn't specified in the update. When updating a compute environment, changing the launch template requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* . \n\nDefault: `$Default`\n\nLatest: `$Latest`", "title": "Version", "type": "string" } @@ -34396,7 +34396,7 @@ "additionalProperties": false, "properties": { "TypeName": { - "markdownDescription": "The name of the hook.\n\nYou must specify either `TypeVersionArn` , or `TypeName` and `VersionId` .", + "markdownDescription": "The name of the Hook.\n\nYou must specify either `TypeVersionArn` , or `TypeName` and `VersionId` .", "title": "TypeName", "type": "string" }, @@ -34469,22 +34469,22 @@ "additionalProperties": false, "properties": { "Configuration": { - "markdownDescription": "Specifies the activated hook type configuration, in this AWS account and AWS Region .\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` .", + "markdownDescription": "Specifies the activated Hook type configuration, in this AWS account and AWS Region .\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` .", "title": "Configuration", "type": "string" }, "ConfigurationAlias": { - "markdownDescription": "Specifies the activated hook type configuration, in this AWS account and AWS Region .\n\nDefaults to `default` alias. Hook types currently support default configuration alias.", + "markdownDescription": "Specifies the activated Hook type configuration, in this AWS account and AWS Region .\n\nDefaults to `default` alias. Hook types currently support default configuration alias.", "title": "ConfigurationAlias", "type": "string" }, "TypeArn": { - "markdownDescription": "The Amazon Resource Number (ARN) for the hook to set `Configuration` for.\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` .", + "markdownDescription": "The Amazon Resource Number (ARN) for the Hook to set `Configuration` for.\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` .", "title": "TypeArn", "type": "string" }, "TypeName": { - "markdownDescription": "The unique name for your hook. Specifies a three-part namespace for your hook, with a recommended pattern of `Organization::Service::Hook` .\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` .", + "markdownDescription": "The unique name for your Hook. Specifies a three-part namespace for your Hook, with a recommended pattern of `Organization::Service::Hook` .\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` .", "title": "TypeName", "type": "string" } @@ -34551,7 +34551,7 @@ "additionalProperties": false, "properties": { "ExecutionRoleArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the task execution role that grants the hook permission.", + "markdownDescription": "The Amazon Resource Name (ARN) of the task execution role that grants the Hook permission.", "title": "ExecutionRoleArn", "type": "string" }, @@ -34561,7 +34561,7 @@ "title": "LoggingConfig" }, "SchemaHandlerPackage": { - "markdownDescription": "A URL to the Amazon S3 bucket containing the hook project package that contains the necessary files for the hook you want to register.\n\nFor information on generating a schema handler package for the resource you want to register, see [submit](https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/resource-type-cli-submit.html) in the *CloudFormation CLI User Guide for Extension Development* .\n\n> The user registering the resource must be able to access the package in the S3 bucket. That's, the user must have [GetObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) permissions for the schema handler package. For more information, see [Actions, Resources, and Condition Keys for Amazon S3](https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazons3.html) in the *AWS Identity and Access Management User Guide* .", + "markdownDescription": "A URL to the Amazon S3 bucket containing the Hook project package that contains the necessary files for the Hook you want to register.\n\nFor information on generating a schema handler package for the resource you want to register, see [submit](https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/resource-type-cli-submit.html) in the *CloudFormation CLI User Guide for Extension Development* .\n\n> The user registering the resource must be able to access the package in the S3 bucket. That's, the user must have [GetObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) permissions for the schema handler package. For more information, see [Actions, Resources, and Condition Keys for Amazon S3](https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazons3.html) in the *AWS Identity and Access Management User Guide* .", "title": "SchemaHandlerPackage", "type": "string" }, @@ -37062,7 +37062,7 @@ "additionalProperties": false, "properties": { "Bucket": { - "markdownDescription": "The Amazon S3 bucket to store the access logs in, for example, `myawslogbucket.s3.amazonaws.com` .", + "markdownDescription": "The Amazon S3 bucket to store the access logs in, for example, `amzn-s3-demo-bucket.s3.amazonaws.com` .", "title": "Bucket", "type": "string" }, @@ -38864,7 +38864,7 @@ "additionalProperties": false, "properties": { "Bucket": { - "markdownDescription": "The Amazon S3 bucket to store the access logs in, for example, `myawslogbucket.s3.amazonaws.com` .", + "markdownDescription": "The Amazon S3 bucket to store the access logs in, for example, `amzn-s3-demo-bucket.s3.amazonaws.com` .", "title": "Bucket", "type": "string" }, @@ -39261,7 +39261,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events and network activity events.\n\nFor management events, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` . The following additional fields are available for event data stores: `eventName` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` . The following additional fields are available for event data stores: `eventSource` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events, data events (for event data stores only), and network activity events.\n\nFor management events for trails, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor management and data events for event data stores, you can use it to include or exclude any event source and can use any operator.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events, management events (for event data stores only), and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For events outside of AWS , the value must be `ActivityAuditLog` .\n- *`eventType`* - This is an optional field available only for event data stores, which is used to filter management and data events on the event type. For information about available event types, see [CloudTrail record contents](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-record-contents.html#ct-event-type) in the *AWS CloudTrail user guide* .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`sessionCredentialFromConsole`* - This is an optional field available only for event data stores, which is used to filter management and data events based on whether the events originated from an AWS Management Console session. `sessionCredentialFromConsole` can only use the `Equals` and `NotEquals` operators.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`userIdentity.arn`* - This is an optional field available only for event data stores, which is used to filter management and data events on the userIdentity ARN. You can use any operator with `userIdentity.arn` . For more information on the userIdentity element, see [CloudTrail userIdentity element](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-user-identity.html) in the *AWS CloudTrail User Guide* .\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", "title": "Field", "type": "string" }, @@ -39584,7 +39584,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events and network activity events.\n\nFor management events, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` . The following additional fields are available for event data stores: `eventName` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` . The following additional fields are available for event data stores: `eventSource` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events, data events (for event data stores only), and network activity events.\n\nFor management events for trails, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor management and data events for event data stores, you can use it to include or exclude any event source and can use any operator.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events, management events (for event data stores only), and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For events outside of AWS , the value must be `ActivityAuditLog` .\n- *`eventType`* - This is an optional field available only for event data stores, which is used to filter management and data events on the event type. For information about available event types, see [CloudTrail record contents](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-record-contents.html#ct-event-type) in the *AWS CloudTrail user guide* .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`sessionCredentialFromConsole`* - This is an optional field available only for event data stores, which is used to filter management and data events based on whether the events originated from an AWS Management Console session. `sessionCredentialFromConsole` can only use the `Equals` and `NotEquals` operators.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`userIdentity.arn`* - This is an optional field available only for event data stores, which is used to filter management and data events on the userIdentity ARN. You can use any operator with `userIdentity.arn` . For more information on the userIdentity element, see [CloudTrail userIdentity element](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-user-identity.html) in the *AWS CloudTrail User Guide* .\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", "title": "Field", "type": "string" }, @@ -41208,7 +41208,7 @@ "type": "string" }, "EnvironmentType": { - "markdownDescription": "The environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `MAC_ARM` is available only in regions US East (Ohio), US East (N. Virginia), US West (Oregon), Europe (Frankfurt), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* .", + "markdownDescription": "The environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo).\n- The environment type `ARM_EC2` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_EC2` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `MAC_ARM` is available only in regions US East (Ohio), US East (N. Virginia), US West (Oregon), Europe (Frankfurt), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_EC2` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* .", "title": "EnvironmentType", "type": "string" }, @@ -41910,7 +41910,7 @@ "type": "string" }, "ReportBuildStatus": { - "markdownDescription": "Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, or Bitbucket. If this is set and you use a different source provider, an `invalidInputException` is thrown.", + "markdownDescription": "Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, or Bitbucket. If this is set and you use a different source provider, an `invalidInputException` is thrown.", "title": "ReportBuildStatus", "type": "boolean" }, @@ -63656,22 +63656,22 @@ "additionalProperties": false, "properties": { "Content": { - "markdownDescription": "The content of the metadata form.", + "markdownDescription": "", "title": "Content", "type": "string" }, "FormName": { - "markdownDescription": "The name of the metadata form.", + "markdownDescription": "", "title": "FormName", "type": "string" }, "TypeIdentifier": { - "markdownDescription": "The ID of the metadata form type.", + "markdownDescription": "", "title": "TypeIdentifier", "type": "string" }, "TypeRevision": { - "markdownDescription": "The revision of the metadata form type.", + "markdownDescription": "", "title": "TypeRevision", "type": "string" } @@ -64746,12 +64746,12 @@ "additionalProperties": false, "properties": { "Max": { - "markdownDescription": "The maximum GPU for the accelerator.", + "markdownDescription": "The maximum number of GPU accelerators in the worker host.", "title": "Max", "type": "number" }, "Min": { - "markdownDescription": "The minimum GPU for the accelerator.", + "markdownDescription": "The minimum number of GPU accelerators in the worker host.", "title": "Min", "type": "number" } @@ -67906,7 +67906,7 @@ }, "ImportSourceSpecification": { "$ref": "#/definitions/AWS::DynamoDB::Table.ImportSourceSpecification", - "markdownDescription": "Specifies the properties of data being imported from the S3 bucket source to the table.\n\n> If you specify the `ImportSourceSpecification` property, and also specify either the `StreamSpecification` , the `TableClass` property, or the `DeletionProtectionEnabled` property, the IAM entity creating/updating stack must have `UpdateTable` permission.", + "markdownDescription": "Specifies the properties of data being imported from the S3 bucket source to the\" table.\n\n> If you specify the `ImportSourceSpecification` property, and also specify either the `StreamSpecification` , the `TableClass` property, the `DeletionProtectionEnabled` property, or the `WarmThroughput` property, the IAM entity creating/updating stack must have `UpdateTable` permission.", "title": "ImportSourceSpecification" }, "KeySchema": { @@ -76304,7 +76304,7 @@ "type": "array" }, "MaxEntries": { - "markdownDescription": "The maximum number of entries for the prefix list.", + "markdownDescription": "The maximum number of entries for the prefix list. You can't modify the entries and the size of a prefix list at the same time.\n\nThis property is required when you create a prefix list.", "title": "MaxEntries", "type": "number" }, @@ -83548,7 +83548,7 @@ "type": "boolean" }, "HealthCheckGracePeriodSeconds": { - "markdownDescription": "The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy Elastic Load Balancing target health checks after a task has first started. This is only used when your service is configured to use a load balancer. If your service has a load balancer defined and you don't specify a health check grace period value, the default value of `0` is used.\n\nIf you do not use an Elastic Load Balancing, we recommend that you use the `startPeriod` in the task definition health check parameters. For more information, see [Health check](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_HealthCheck.html) .\n\nIf your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can specify a health check grace period of up to 2,147,483,647 seconds (about 69 years). During that time, the Amazon ECS service scheduler ignores health check status. This grace period can prevent the service scheduler from marking tasks as unhealthy and stopping them before they have time to come up.", + "markdownDescription": "The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy Elastic Load Balancing, VPC Lattice, and container health checks after a task has first started. If you don't specify a health check grace period value, the default value of `0` is used. If you don't use any of the health checks, then `healthCheckGracePeriodSeconds` is unused.\n\nIf your service's tasks take a while to start and respond to health checks, you can specify a health check grace period of up to 2,147,483,647 seconds (about 69 years). During that time, the Amazon ECS service scheduler ignores health check status. This grace period can prevent the service scheduler from marking tasks as unhealthy and stopping them before they have time to come up.", "title": "HealthCheckGracePeriodSeconds", "type": "number" }, @@ -85103,7 +85103,7 @@ "type": "number" }, "Name": { - "markdownDescription": "The name that's used for the port mapping. This parameter only applies to Service Connect. This parameter is the name that you use in the `serviceConnectConfiguration` of a service. The name can include up to 64 characters. The characters can include lowercase letters, numbers, underscores (_), and hyphens (-). The name can't start with a hyphen.\n\nFor more information, see [Service Connect](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html) in the *Amazon Elastic Container Service Developer Guide* .", + "markdownDescription": "The name that's used for the port mapping. This parameter is the name that you use in the `serviceConnectConfiguration` and the `vpcLatticeConfigurations` of a service. The name can include up to 64 characters. The characters can include lowercase letters, numbers, underscores (_), and hyphens (-). The name can't start with a hyphen.", "title": "Name", "type": "string" }, @@ -102923,8 +102923,6 @@ "items": { "$ref": "#/definitions/AWS::GameLift::ContainerGroupDefinition.ContainerDefinition" }, - "markdownDescription": "The set of container definitions that are included in the container group.", - "title": "ContainerDefinitions", "type": "array" }, "Name": { @@ -102933,13 +102931,11 @@ "type": "string" }, "OperatingSystem": { - "markdownDescription": "The platform required for all containers in the container group definition.\n\n> Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the [Amazon Linux 2 FAQs](https://docs.aws.amazon.com/https://aws.amazon.com/amazon-linux-2/faqs/) . For game servers that are hosted on AL2 and use Amazon GameLift server SDK 4.x., first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See [Migrate to Amazon GameLift server SDK version 5.](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-serversdk5-migration.html)", + "markdownDescription": "The platform that all containers in the container group definition run on.\n\n> Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the [Amazon Linux 2 FAQs](https://docs.aws.amazon.com/https://aws.amazon.com/amazon-linux-2/faqs/) . For game servers that are hosted on AL2 and use Amazon GameLift server SDK 4.x, first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See [Migrate to Amazon GameLift server SDK version 5.](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-serversdk5-migration.html)", "title": "OperatingSystem", "type": "string" }, "SchedulingStrategy": { - "markdownDescription": "The method for deploying the container group across fleet instances. A replica container group might have multiple copies on each fleet instance. A daemon container group maintains only one copy per fleet instance.", - "title": "SchedulingStrategy", "type": "string" }, "Tags": { @@ -102951,13 +102947,9 @@ "type": "array" }, "TotalCpuLimit": { - "markdownDescription": "The amount of CPU units on a fleet instance to allocate for the container group. All containers in the group share these resources. This property is an integer value in CPU units (1 vCPU is equal to 1024 CPU units).\n\nYou can set additional limits for each `ContainerDefinition` in the group. If individual containers have limits, this value must be equal to or greater than the sum of all container-specific CPU limits in the group.", - "title": "TotalCpuLimit", "type": "number" }, "TotalMemoryLimit": { - "markdownDescription": "The amount of memory (in MiB) on a fleet instance to allocate for the container group. All containers in the group share these resources.\n\nYou can set additional limits for each `ContainerDefinition` in the group. If individual containers have limits, this value must meet the following requirements:\n\n- Equal to or greater than the sum of all container-specific soft memory limits in the group.\n- Equal to or greater than any container-specific hard limits in the group.", - "title": "TotalMemoryLimit", "type": "number" } }, @@ -102998,77 +102990,51 @@ "items": { "type": "string" }, - "markdownDescription": "A command that's passed to the container on startup. Each argument for the command is an additional string in the array. See the [ContainerDefinition::command](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html#ECS-Type-ContainerDefinition-command) parameter in the *Amazon Elastic Container Service API reference.*", - "title": "Command", "type": "array" }, "ContainerName": { - "markdownDescription": "The container definition identifier. Container names are unique within a container group definition.", - "title": "ContainerName", "type": "string" }, "Cpu": { - "markdownDescription": "The number of CPU units that are reserved for the container. Note: 1 vCPU unit equals 1024 CPU units. If no resources are reserved, the container shares the total CPU limit for the container group.\n\n*Related data type:* `ContainerGroupDefinition$TotalCpuLimit`", - "title": "Cpu", "type": "number" }, "DependsOn": { "items": { "$ref": "#/definitions/AWS::GameLift::ContainerGroupDefinition.ContainerDependency" }, - "markdownDescription": "Indicates that the container relies on the status of other containers in the same container group during its startup and shutdown sequences. A container might have dependencies on multiple containers.", - "title": "DependsOn", "type": "array" }, "EntryPoint": { "items": { "type": "string" }, - "markdownDescription": "The entry point that's passed to the container on startup. If there are multiple arguments, each argument is an additional string in the array. See the [ContainerDefinition::entryPoint](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html#ECS-Type-ContainerDefinition-entryPoint) parameter in the *Amazon Elastic Container Service API Reference* .", - "title": "EntryPoint", "type": "array" }, "Environment": { "items": { "$ref": "#/definitions/AWS::GameLift::ContainerGroupDefinition.ContainerEnvironment" }, - "markdownDescription": "A set of environment variables that's passed to the container on startup. See the [ContainerDefinition::environment](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html#ECS-Type-ContainerDefinition-environment) parameter in the *Amazon Elastic Container Service API Reference* .", - "title": "Environment", "type": "array" }, "Essential": { - "markdownDescription": "Indicates whether the container is vital to the container group. If an essential container fails, the entire container group is restarted.", - "title": "Essential", "type": "boolean" }, "HealthCheck": { - "$ref": "#/definitions/AWS::GameLift::ContainerGroupDefinition.ContainerHealthCheck", - "markdownDescription": "A configuration for a non-terminal health check. A container, which automatically restarts if it stops functioning, also restarts if it fails this health check. If an essential container in the daemon group fails a health check, the entire container group is restarted. The essential container in the replica group doesn't use this health check mechanism, because the Amazon GameLift Agent automatically handles the task.", - "title": "HealthCheck" + "$ref": "#/definitions/AWS::GameLift::ContainerGroupDefinition.ContainerHealthCheck" }, "ImageUri": { - "markdownDescription": "The URI to the image that $short; copied and deployed to a container fleet. For a more specific identifier, see `ResolvedImageDigest` .", - "title": "ImageUri", "type": "string" }, "MemoryLimits": { - "$ref": "#/definitions/AWS::GameLift::ContainerGroupDefinition.MemoryLimits", - "markdownDescription": "The amount of memory that Amazon GameLift makes available to the container. If memory limits aren't set for an individual container, the container shares the container group's total memory allocation.\n\n*Related data type:* `ContainerGroupDefinition$TotalMemoryLimit`", - "title": "MemoryLimits" + "$ref": "#/definitions/AWS::GameLift::ContainerGroupDefinition.MemoryLimits" }, "PortConfiguration": { - "$ref": "#/definitions/AWS::GameLift::ContainerGroupDefinition.PortConfiguration", - "markdownDescription": "Defines the ports that are available to assign to processes in the container. For example, a game server process requires a container port to allow game clients to connect to it. Container ports aren't directly accessed by inbound traffic. Amazon GameLift maps these container ports to externally accessible connection ports, which are assigned as needed from the container fleet's `ConnectionPortRange` .", - "title": "PortConfiguration" + "$ref": "#/definitions/AWS::GameLift::ContainerGroupDefinition.PortConfiguration" }, "ResolvedImageDigest": { - "markdownDescription": "A unique and immutable identifier for the container image that is deployed to a container fleet. The digest is a SHA 256 hash of the container image manifest.", - "title": "ResolvedImageDigest", "type": "string" }, "WorkingDirectory": { - "markdownDescription": "The directory in the container where commands are run. See the [ContainerDefinition::workingDirectory](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html#ECS-Type-ContainerDefinition-workingDirectory) parameter in the *Amazon Elastic Container Service API Reference* .", - "title": "WorkingDirectory", "type": "string" } }, @@ -103135,7 +103101,7 @@ "type": "number" }, "Retries": { - "markdownDescription": "The number of times to retry a failed health check before the container is considered unhealthy. The first run of the command does not count as a retry.", + "markdownDescription": "The number of times to retry a failed health check before flagging the container unhealthy. The first run of the command does not count as a retry.", "title": "Retries", "type": "number" }, @@ -103145,7 +103111,7 @@ "type": "number" }, "Timeout": { - "markdownDescription": "The time period (in seconds) to wait for a health check to succeed before a failed health check is counted.", + "markdownDescription": "The time period (in seconds) to wait for a health check to succeed before counting a failed health check.", "title": "Timeout", "type": "number" } @@ -103185,13 +103151,9 @@ "additionalProperties": false, "properties": { "HardLimit": { - "markdownDescription": "", - "title": "HardLimit", "type": "number" }, "SoftLimit": { - "markdownDescription": "", - "title": "SoftLimit", "type": "number" } }, @@ -103255,7 +103217,7 @@ "title": "AnywhereConfiguration" }, "ApplyCapacity": { - "markdownDescription": "Current resource capacity settings for managed EC2 fleets. For multi-location fleets, location values might refer to a fleet's remote location or its home Region.\n\n*Returned by:* [DescribeFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetCapacity.html) , [DescribeFleetLocationCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html) , [UpdateFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetCapacity.html)", + "markdownDescription": "Current resource capacity settings for managed EC2 fleets and managed container fleets. For multi-location fleets, location values might refer to a fleet's remote location or its home Region.\n\n*Returned by:* [DescribeFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetCapacity.html) , [DescribeFleetLocationCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html) , [UpdateFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetCapacity.html)", "title": "ApplyCapacity", "type": "string" }, @@ -103270,14 +103232,12 @@ "title": "CertificateConfiguration" }, "ComputeType": { - "markdownDescription": "The type of compute resource used to host your game servers.\n\n- `EC2` \u2013 The game server build is deployed to Amazon EC2 instances for cloud hosting. This is the default setting.\n- `CONTAINER` \u2013 Container images with your game server build and supporting software are deployed to Amazon EC2 instances for cloud hosting. With this compute type, you must specify the `ContainerGroupsConfiguration` parameter.\n- `ANYWHERE` \u2013 Game servers or container images with your game server and supporting software are deployed to compute resources that are provided and managed by you. With this compute type, you can also set the `AnywhereConfiguration` parameter.", + "markdownDescription": "The type of compute resource used to host your game servers.\n\n- `EC2` \u2013 The game server build is deployed to Amazon EC2 instances for cloud hosting. This is the default setting.\n- `ANYWHERE` \u2013 Game servers and supporting software are deployed to compute resources that you provide and manage. With this compute type, you can also set the `AnywhereConfiguration` parameter.", "title": "ComputeType", "type": "string" }, "ContainerGroupsConfiguration": { - "$ref": "#/definitions/AWS::GameLift::Fleet.ContainerGroupsConfiguration", - "markdownDescription": "*This data type is currently not available. It is under improvement as we respond to customer feedback from the Containers public preview.*\n\nConfiguration details for a set of container groups, for use when creating a fleet with compute type `CONTAINER` .\n\n*Used with:* `CreateFleet`", - "title": "ContainerGroupsConfiguration" + "$ref": "#/definitions/AWS::GameLift::Fleet.ContainerGroupsConfiguration" }, "Description": { "markdownDescription": "A description for the fleet.", @@ -103293,12 +103253,12 @@ "items": { "$ref": "#/definitions/AWS::GameLift::Fleet.IpPermission" }, - "markdownDescription": "The IP address ranges and port settings that allow inbound traffic to access game server processes and other processes on this fleet. Set this parameter for EC2 and container fleets. You can leave this parameter empty when creating the fleet, but you must call `UpdateFleetPortSettings` to set it before players can connect to game sessions. As a best practice, we recommend opening ports for remote access only when you need them and closing them when you're finished. For Realtime Servers fleets, Amazon GameLift automatically sets TCP and UDP ranges.\n\nTo manage inbound access for a container fleet, set this parameter to the same port numbers that you set for the fleet's connection port range. During the life of the fleet, update this parameter to control which connection ports are open to inbound traffic.", + "markdownDescription": "The IP address ranges and port settings that allow inbound traffic to access game server processes and other processes on this fleet. Set this parameter for managed EC2 fleets. You can leave this parameter empty when creating the fleet, but you must call [](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetPortSettings) to set it before players can connect to game sessions. As a best practice, we recommend opening ports for remote access only when you need them and closing them when you're finished. For Realtime Servers fleets, Amazon GameLift automatically sets TCP and UDP ranges.", "title": "EC2InboundPermissions", "type": "array" }, "EC2InstanceType": { - "markdownDescription": "The Amazon GameLift-supported Amazon EC2 instance type to use with EC2 and container fleets. Instance type determines the computing resources that will be used to host your game servers, including CPU, memory, storage, and networking capacity. See [Amazon Elastic Compute Cloud Instance Types](https://docs.aws.amazon.com/ec2/instance-types/) for detailed descriptions of Amazon EC2 instance types.", + "markdownDescription": "The Amazon GameLift-supported Amazon EC2 instance type to use with managed EC2 fleets. Instance type determines the computing resources that will be used to host your game servers, including CPU, memory, storage, and networking capacity. See [Amazon Elastic Compute Cloud Instance Types](https://docs.aws.amazon.com/ec2/instance-types/) for detailed descriptions of Amazon EC2 instance types.", "title": "EC2InstanceType", "type": "string" }, @@ -103308,12 +103268,12 @@ "type": "string" }, "InstanceRoleARN": { - "markdownDescription": "A unique identifier for an IAM role with access permissions to other AWS services. Any application that runs on an instance in the fleet--including install scripts, server processes, and other processes--can use these permissions to interact with AWS resources that you own or have access to. For more information about using the role with your game server builds, see [Communicate with other AWS resources from your fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is \"EC2\".", + "markdownDescription": "A unique identifier for an IAM role that manages access to your AWS services. With an instance role ARN set, any application that runs on an instance in this fleet can assume the role, including install scripts, server processes, and daemons (background processes). Create a role or look up a role's ARN by using the [IAM dashboard](https://docs.aws.amazon.com/iam/) in the AWS Management Console . Learn more about using on-box credentials for your game servers at [Access external resources from a game server](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is `EC2` .", "title": "InstanceRoleARN", "type": "string" }, "InstanceRoleCredentialsProvider": { - "markdownDescription": "Indicates that fleet instances maintain a shared credentials file for the IAM role defined in `InstanceRoleArn` . Shared credentials allow applications that are deployed with the game server executable to communicate with other AWS resources. This property is used only when the game server is integrated with the server SDK version 5.x. For more information about using shared credentials, see [Communicate with other AWS resources from your fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is \"EC2\".", + "markdownDescription": "Indicates that fleet instances maintain a shared credentials file for the IAM role defined in `InstanceRoleArn` . Shared credentials allow applications that are deployed with the game server executable to communicate with other AWS resources. This property is used only when the game server is integrated with the server SDK version 5.x. For more information about using shared credentials, see [Communicate with other AWS resources from your fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is `EC2` .", "title": "InstanceRoleCredentialsProvider", "type": "string" }, @@ -103445,13 +103405,9 @@ "additionalProperties": false, "properties": { "FromPort": { - "markdownDescription": "Starting value for the port range.", - "title": "FromPort", "type": "number" }, "ToPort": { - "markdownDescription": "Ending value for the port. Port numbers are end-inclusive. This value must be equal to or greater than `FromPort` .", - "title": "ToPort", "type": "number" } }, @@ -103465,22 +103421,16 @@ "additionalProperties": false, "properties": { "ConnectionPortRange": { - "$ref": "#/definitions/AWS::GameLift::Fleet.ConnectionPortRange", - "markdownDescription": "A set of ports to allow inbound traffic, including game clients, to connect to processes running in the container fleet.\n\nConnection ports are dynamically mapped to container ports, which are assigned to individual processes running in a container. The connection port range must have enough ports to map to all container ports across a fleet instance. To calculate the minimum connection ports needed, use the following formula:\n\n*[Total number of container ports as defined for containers in the replica container group] * [Desired or calculated number of replica container groups per instance] + [Total number of container ports as defined for containers in the daemon container group]*\n\nAs a best practice, double the minimum number of connection ports.\n\n> Use the fleet's `EC2InboundPermissions` property to control external access to connection ports. Set this property to the connection port numbers that you want to open access to. See `IpPermission` for more details.", - "title": "ConnectionPortRange" + "$ref": "#/definitions/AWS::GameLift::Fleet.ConnectionPortRange" }, "ContainerGroupDefinitionNames": { "items": { "type": "string" }, - "markdownDescription": "The list of container group definition names to deploy to a new container fleet.", - "title": "ContainerGroupDefinitionNames", "type": "array" }, "ContainerGroupsPerInstance": { - "$ref": "#/definitions/AWS::GameLift::Fleet.ContainerGroupsPerInstance", - "markdownDescription": "", - "title": "ContainerGroupsPerInstance" + "$ref": "#/definitions/AWS::GameLift::Fleet.ContainerGroupsPerInstance" } }, "required": [ @@ -103493,13 +103443,9 @@ "additionalProperties": false, "properties": { "DesiredReplicaContainerGroupsPerInstance": { - "markdownDescription": "The desired number of replica container groups to place on each fleet instance.", - "title": "DesiredReplicaContainerGroupsPerInstance", "type": "number" }, "MaxReplicaContainerGroupsPerInstance": { - "markdownDescription": "The maximum possible number of replica container groups that each fleet instance can have.", - "title": "MaxReplicaContainerGroupsPerInstance", "type": "number" } }, @@ -103573,7 +103519,7 @@ }, "LocationCapacity": { "$ref": "#/definitions/AWS::GameLift::Fleet.LocationCapacity", - "markdownDescription": "Current resource capacity settings for managed EC2 fleets. For multi-location fleets, location values might refer to a fleet's remote location or its home Region.\n\n*Returned by:* [DescribeFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetCapacity.html) , [DescribeFleetLocationCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html) , [UpdateFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetCapacity.html)", + "markdownDescription": "Current resource capacity settings for managed EC2 fleets and managed container fleets. For multi-location fleets, location values might refer to a fleet's remote location or its home Region.\n\n*Returned by:* [DescribeFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetCapacity.html) , [DescribeFleetLocationCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html) , [UpdateFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetCapacity.html)", "title": "LocationCapacity" } }, @@ -103607,7 +103553,7 @@ "type": "number" }, "MaxConcurrentGameSessionActivations": { - "markdownDescription": "The number of game sessions in status `ACTIVATING` to allow on an instance. This setting limits the instance resources that can be used for new game activations at any one time.", + "markdownDescription": "The number of game sessions in status `ACTIVATING` to allow on an instance or compute. This setting limits the instance resources that can be used for new game activations at any one time.", "title": "MaxConcurrentGameSessionActivations", "type": "number" }, @@ -103696,7 +103642,7 @@ "additionalProperties": false, "properties": { "ConcurrentExecutions": { - "markdownDescription": "The number of server processes using this configuration that run concurrently on each instance.", + "markdownDescription": "The number of server processes using this configuration that run concurrently on each instance or compute.", "title": "ConcurrentExecutions", "type": "number" }, @@ -104004,7 +103950,7 @@ "items": { "$ref": "#/definitions/AWS::GameLift::GameSessionQueue.PlayerLatencyPolicy" }, - "markdownDescription": "A set of policies that act as a sliding cap on player latency. FleetIQ works to deliver low latency for most players in a game session. These policies ensure that no individual player can be placed into a game with unreasonably high latency. Use multiple policies to gradually relax latency requirements a step at a time. Multiple policies are applied based on their maximum allowed latency, starting with the lowest value.", + "markdownDescription": "A set of policies that enforce a sliding cap on player latency when processing game sessions placement requests. Use multiple policies to gradually relax the cap over time if Amazon GameLift can't make a placement. Policies are evaluated in order starting with the lowest maximum latency value.", "title": "PlayerLatencyPolicies", "type": "array" }, @@ -104022,7 +103968,7 @@ "type": "array" }, "TimeoutInSeconds": { - "markdownDescription": "The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a `TIMED_OUT` status. By default, this property is set to `600` .", + "markdownDescription": "The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a `TIMED_OUT` status.", "title": "TimeoutInSeconds", "type": "number" } @@ -104237,7 +104183,7 @@ "type": "number" }, "AdditionalPlayerCount": { - "markdownDescription": "The number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single 10-person team, and the additional player count is set to 2, 10 players will be selected for the match and 2 more player slots will be open for future players. This parameter is not used if `FlexMatchMode` is set to `STANDALONE` .", + "markdownDescription": "The number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single 12-person team, and the additional player count is set to 2, only 10 players are selected for the match. This parameter is not used if `FlexMatchMode` is set to `STANDALONE` .", "title": "AdditionalPlayerCount", "type": "number" }, @@ -130083,7 +130029,7 @@ "additionalProperties": false, "properties": { "DashboardDefinition": { - "markdownDescription": "The dashboard definition specified in a JSON literal. For detailed information, see [Creating dashboards (CLI)](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/create-dashboards-using-aws-cli.html) in the *AWS IoT SiteWise User Guide* .", + "markdownDescription": "The dashboard definition specified in a JSON literal.\n\n- AWS IoT SiteWise Monitor (Classic) see [Create dashboards ( AWS CLI )](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/create-dashboards-using-aws-cli.html)\n- AWS IoT SiteWise Monitor (AI-aware) see [Create dashboards ( AWS CLI )](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/create-dashboards-ai-dashboard-cli.html)\n\nin the *AWS IoT SiteWise User Guide*", "title": "DashboardDefinition", "type": "string" }, @@ -139207,7 +139153,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A set of tags to assign to the Firehose stream. A tag is a key-value pair that you can define and assign to AWS resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the Firehose stream. For more information about tags, see [Using Cost Allocation Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the AWS Billing and Cost Management User Guide.\n\nYou can specify up to 50 tags when creating a Firehose stream.\n\nIf you specify tags in the `CreateDeliveryStream` action, Amazon Data Firehose performs an additional authorization on the `firehose:TagDeliveryStream` action to verify if users have permissions to create tags. If you do not provide this permission, requests to create new Firehose Firehose streams with IAM resource tags will fail with an `AccessDeniedException` such as following.\n\n*AccessDeniedException*\n\nUser: arn:aws:sts::x:assumed-role/x/x is not authorized to perform: firehose:TagDeliveryStream on resource: arn:aws:firehose:us-east-1:x:deliverystream/x with an explicit deny in an identity-based policy.\n\nFor an example IAM policy, see [Tag example.](https://docs.aws.amazon.com/firehose/latest/APIReference/API_CreateDeliveryStream.html#API_CreateDeliveryStream_Examples)", + "markdownDescription": "A set of tags to assign to the Firehose stream. A tag is a key-value pair that you can define and assign to AWS resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the Firehose stream. For more information about tags, see [Using Cost Allocation Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the AWS Billing and Cost Management User Guide.\n\nYou can specify up to 50 tags when creating a Firehose stream.\n\nIf you specify tags in the `CreateDeliveryStream` action, Amazon Data Firehose performs an additional authorization on the `firehose:TagDeliveryStream` action to verify if users have permissions to create tags. If you do not provide this permission, requests to create new Firehose streams with IAM resource tags will fail with an `AccessDeniedException` such as following.\n\n*AccessDeniedException*\n\nUser: arn:aws:sts::x:assumed-role/x/x is not authorized to perform: firehose:TagDeliveryStream on resource: arn:aws:firehose:us-east-1:x:deliverystream/x with an explicit deny in an identity-based policy.\n\nFor an example IAM policy, see [Tag example.](https://docs.aws.amazon.com/firehose/latest/APIReference/API_CreateDeliveryStream.html#API_CreateDeliveryStream_Examples)", "title": "Tags", "type": "array" } @@ -140395,7 +140341,7 @@ }, "ParquetSerDe": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.ParquetSerDe", - "markdownDescription": "A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, see [Apache Parquet](https://docs.aws.amazon.com/https://parquet.apache.org/documentation/latest/) .", + "markdownDescription": "A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, see [Apache Parquet](https://docs.aws.amazon.com/https://parquet.apache.org/docs/contribution-guidelines/) .", "title": "ParquetSerDe" } }, @@ -140415,7 +140361,7 @@ "title": "CloudWatchLoggingOptions" }, "ContentColumnName": { - "markdownDescription": "The name of the record content column", + "markdownDescription": "The name of the record content column.", "title": "ContentColumnName", "type": "string" }, @@ -140435,7 +140381,7 @@ "type": "string" }, "MetaDataColumnName": { - "markdownDescription": "The name of the record metadata column", + "markdownDescription": "Specify a column name in the table, where the metadata information has to be loaded. When you enable this field, you will see the following column in the snowflake table, which differs based on the source type.\n\nFor Direct PUT as source\n\n`{ \"firehoseDeliveryStreamName\" : \"streamname\", \"IngestionTime\" : \"timestamp\" }`\n\nFor Kinesis Data Stream as source\n\n`\"kinesisStreamName\" : \"streamname\", \"kinesisShardId\" : \"Id\", \"kinesisPartitionKey\" : \"key\", \"kinesisSequenceNumber\" : \"1234\", \"subsequenceNumber\" : \"2334\", \"IngestionTime\" : \"timestamp\" }`", "title": "MetaDataColumnName", "type": "string" }, @@ -142442,7 +142388,7 @@ "properties": { "DestinationConfig": { "$ref": "#/definitions/AWS::Lambda::EventInvokeConfig.DestinationConfig", - "markdownDescription": "A destination for events after they have been sent to a function for processing.\n\n**Destinations** - *Function* - The Amazon Resource Name (ARN) of a Lambda function.\n- *Queue* - The ARN of a standard SQS queue.\n- *Topic* - The ARN of a standard SNS topic.\n- *Event Bus* - The ARN of an Amazon EventBridge event bus.", + "markdownDescription": "A destination for events after they have been sent to a function for processing.\n\n**Destinations** - *Function* - The Amazon Resource Name (ARN) of a Lambda function.\n- *Queue* - The ARN of a standard SQS queue.\n- *Bucket* - The ARN of an Amazon S3 bucket.\n- *Topic* - The ARN of a standard SNS topic.\n- *Event Bus* - The ARN of an Amazon EventBridge event bus.\n\n> S3 buckets are supported only for on-failure destinations. To retain records of successful invocations, use another destination type.", "title": "DestinationConfig" }, "FunctionName": { @@ -142513,7 +142459,7 @@ "additionalProperties": false, "properties": { "Destination": { - "markdownDescription": "The Amazon Resource Name (ARN) of the destination resource.\n\nTo retain records of [asynchronous invocations](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) , you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.\n\nTo retain records of failed invocations from [Kinesis and DynamoDB event sources](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#event-source-mapping-destinations) , you can configure an Amazon SNS topic or Amazon SQS queue as the destination.\n\nTo retain records of failed invocations from [self-managed Kafka](https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination) or [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination) , you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination.", + "markdownDescription": "The Amazon Resource Name (ARN) of the destination resource.\n\nTo retain records of unsuccessful [asynchronous invocations](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) , you can configure an Amazon SNS topic, Amazon SQS queue, Amazon S3 bucket, Lambda function, or Amazon EventBridge event bus as the destination.\n\nTo retain records of failed invocations from [Kinesis](https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html) , [DynamoDB](https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html) , [self-managed Kafka](https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination) or [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination) , you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination.", "title": "Destination", "type": "string" } @@ -142812,7 +142758,7 @@ "additionalProperties": false, "properties": { "Destination": { - "markdownDescription": "The Amazon Resource Name (ARN) of the destination resource.\n\nTo retain records of [asynchronous invocations](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) , you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.\n\nTo retain records of failed invocations from [Kinesis and DynamoDB event sources](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#event-source-mapping-destinations) , you can configure an Amazon SNS topic or Amazon SQS queue as the destination.\n\nTo retain records of failed invocations from [self-managed Kafka](https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination) or [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination) , you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination.", + "markdownDescription": "The Amazon Resource Name (ARN) of the destination resource.\n\nTo retain records of unsuccessful [asynchronous invocations](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) , you can configure an Amazon SNS topic, Amazon SQS queue, Amazon S3 bucket, Lambda function, or Amazon EventBridge event bus as the destination.\n\nTo retain records of failed invocations from [Kinesis](https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html) , [DynamoDB](https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html) , [self-managed Kafka](https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination) or [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination) , you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination.", "title": "Destination", "type": "string" } @@ -142965,7 +142911,7 @@ "title": "ImageConfig" }, "KmsKeyArn": { - "markdownDescription": "The ARN of the AWS Key Management Service ( AWS KMS ) customer managed key that's used to encrypt your function's [environment variables](https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-encryption) . When [Lambda SnapStart](https://docs.aws.amazon.com/lambda/latest/dg/snapstart-security.html) is activated, Lambda also uses this key is to encrypt your function's snapshot. If you deploy your function using a container image, Lambda also uses this key to encrypt your function when it's deployed. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry ( Amazon ECR ). If you don't provide a customer managed key, Lambda uses a default service key.", + "markdownDescription": "The ARN of the AWS Key Management Service ( AWS KMS ) customer managed key that's used to encrypt the following resources:\n\n- The function's [environment variables](https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-encryption) .\n- The function's [Lambda SnapStart](https://docs.aws.amazon.com/lambda/latest/dg/snapstart-security.html) snapshots.\n- When used with `SourceKMSKeyArn` , the unzipped version of the .zip deployment package that's used for function invocations. For more information, see [Specifying a customer managed key for Lambda](https://docs.aws.amazon.com/lambda/latest/dg/encrypt-zip-package.html#enable-zip-custom-encryption) .\n- The optimized version of the container image that's used for function invocations. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry (Amazon ECR). For more information, see [Function lifecycle](https://docs.aws.amazon.com/lambda/latest/dg/images-create.html#images-lifecycle) .\n\nIf you don't provide a customer managed key, Lambda uses an [AWS owned key](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk) or an [AWS managed key](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) .", "title": "KmsKeyArn", "type": "string" }, @@ -164229,12 +164175,12 @@ "items": { "$ref": "#/definitions/AWS::MediaStore::Container.CorsRule" }, - "markdownDescription": "Sets the cross-origin resource sharing (CORS) configuration on a container so that the container can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com to access your AWS Elemental MediaStore container at my.example.container.com by using the browser's XMLHttpRequest capability.\n\nTo enable CORS on a container, you attach a CORS policy to the container. In the CORS policy, you configure rules that identify origins and the HTTP methods that can be executed on your container. The policy can contain up to 398,000 characters. You can add up to 100 rules to a CORS policy. If more than one rule applies, the service uses the first applicable rule listed.\n\nTo learn more about CORS, see [Cross-Origin Resource Sharing (CORS) in AWS Elemental MediaStore](https://docs.aws.amazon.com/mediastore/latest/ug/cors-policy.html) .", + "markdownDescription": "> End of support notice: On November 13, 2025, AWS will discontinue support for AWS Elemental MediaStore. After November 13, 2025, you will no longer be able to access the AWS Elemental MediaStore console or AWS Elemental MediaStore resources. For more information, visit this [blog post](https://docs.aws.amazon.com/media/support-for-aws-elemental-mediastore-ending-soon/) . \n\nSets the cross-origin resource sharing (CORS) configuration on a container so that the container can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com to access your AWS Elemental MediaStore container at my.example.container.com by using the browser's XMLHttpRequest capability.\n\nTo enable CORS on a container, you attach a CORS policy to the container. In the CORS policy, you configure rules that identify origins and the HTTP methods that can be executed on your container. The policy can contain up to 398,000 characters. You can add up to 100 rules to a CORS policy. If more than one rule applies, the service uses the first applicable rule listed.\n\nTo learn more about CORS, see [Cross-Origin Resource Sharing (CORS) in AWS Elemental MediaStore](https://docs.aws.amazon.com/mediastore/latest/ug/cors-policy.html) .", "title": "CorsPolicy", "type": "array" }, "LifecyclePolicy": { - "markdownDescription": "Writes an object lifecycle policy to a container. If the container already has an object lifecycle policy, the service replaces the existing policy with the new policy. It takes up to 20 minutes for the change to take effect.\n\nFor information about how to construct an object lifecycle policy, see [Components of an Object Lifecycle Policy](https://docs.aws.amazon.com/mediastore/latest/ug/policies-object-lifecycle-components.html) .", + "markdownDescription": "> End of support notice: On November 13, 2025, AWS will discontinue support for AWS Elemental MediaStore. After November 13, 2025, you will no longer be able to access the AWS Elemental MediaStore console or AWS Elemental MediaStore resources. For more information, visit this [blog post](https://docs.aws.amazon.com/media/support-for-aws-elemental-mediastore-ending-soon/) . \n\nWrites an object lifecycle policy to a container. If the container already has an object lifecycle policy, the service replaces the existing policy with the new policy. It takes up to 20 minutes for the change to take effect.\n\nFor information about how to construct an object lifecycle policy, see [Components of an Object Lifecycle Policy](https://docs.aws.amazon.com/mediastore/latest/ug/policies-object-lifecycle-components.html) .", "title": "LifecyclePolicy", "type": "string" }, @@ -224946,7 +224892,7 @@ "type": "number" }, "MinCapacity": { - "markdownDescription": "The minimum number of Aurora capacity units (ACUs) for a DB instance in an Aurora Serverless v2 cluster. You can specify ACU values in half-step increments, such as 8, 8.5, 9, and so on. The smallest value that you can use is 0.5.", + "markdownDescription": "The minimum number of Aurora capacity units (ACUs) for a DB instance in an Aurora Serverless v2 cluster. You can specify ACU values in half-step increments, such as 8, 8.5, 9, and so on. For Aurora versions that support the Aurora Serverless v2 auto-pause feature, the smallest value that you can use is 0. For versions that don't support Aurora Serverless v2 auto-pause, the smallest value that you can use is 0.5.", "title": "MinCapacity", "type": "number" }