diff --git a/aws/sdk/aws-models/config.json b/aws/sdk/aws-models/config.json index 6b4b82802ddd04d9ce0d7bfd98d03463a5513c6d..f96f7b6f754c3a80d716ecc4f99834b0e370cec0 100644 --- a/aws/sdk/aws-models/config.json +++ b/aws/sdk/aws-models/config.json @@ -1776,24 +1776,24 @@ "name": { "target": "com.amazonaws.configservice#RecorderName", "traits": { - "smithy.api#documentation": "
The name of the recorder. By default, Config automatically\n\t\t\tassigns the name \"default\" when creating the configuration recorder.\n\t\t\tYou cannot change the assigned name.
" + "smithy.api#documentation": "The name of the configuration recorder. Config automatically assigns the name of \"default\" when creating the configuration recorder.
\nYou cannot change the name of the configuration recorder after it has been created. To change the configuration recorder name, you must delete it and create a new configuration recorder with a new name.
" } }, "roleARN": { "target": "com.amazonaws.configservice#String", "traits": { - "smithy.api#documentation": "Amazon Resource Name (ARN) of the IAM role used to describe the\n\t\t\tAmazon Web Services resources associated with the account.
\nWhile the API model does not require this field, the server will reject a request without a defined roleARN for the configuration recorder.
\nAmazon Resource Name (ARN) of the IAM role assumed by Config and used by the configuration recorder.
\nWhile the API model does not require this field, the server will reject a request without a defined roleARN
for the configuration recorder.
\n Pre-existing Config role\n
\nIf you have used an Amazon Web Services service that uses Config, such as Security Hub or\n\t\t\t\tControl Tower, and an Config role has already been created, make sure that the\n\t\t\t\tIAM role that you use when setting up Config keeps the same minimum\n\t\t\t\tpermissions as the already created Config role. You must do this so that the\n\t\t\t\tother Amazon Web Services service continues to run as expected.
\nFor example, if Control Tower has an IAM role that allows Config to read\n\t\t\t\tAmazon Simple Storage Service (Amazon S3) objects, make sure that the same permissions are granted\n\t\t\t\twithin the IAM role you use when setting up Config. Otherwise, it may\n\t\t\t\tinterfere with how Control Tower operates. For more information about IAM\n\t\t\t\troles for Config,\n\t\t\t\tsee \n Identity and Access Management for Config\n in the Config Developer Guide.\n\t\t\t
\nSpecifies the types of Amazon Web Services resources for which Config\n\t\t\trecords configuration changes.
" + "smithy.api#documentation": "Specifies which resource types Config\n\t\t\trecords for configuration changes.
\n\n High Number of Config Evaluations\n
\nYou may notice increased activity in your account during your initial month recording with Config when compared to subsequent months. During the\n\t\t\t\tinitial bootstrapping process, Config runs evaluations on all the resources in your account that you have selected\n\t\t\t\tfor Config to record.
\nIf you are running ephemeral workloads, you may see increased activity from Config as it records configuration changes associated with creating and deleting these\n\t\t\t\ttemporary resources. An ephemeral workload is a temporary use of computing resources that are loaded\n\t\t\t\tand run when needed. Examples include Amazon Elastic Compute Cloud (Amazon EC2)\n\t\t\t\tSpot Instances, Amazon EMR jobs, and Auto Scaling. If you want\n\t\t\t\tto avoid the increased activity from running ephemeral workloads, you can run these\n\t\t\t\ttypes of workloads in a separate account with Config turned off to avoid\n\t\t\t\tincreased configuration recording and rule evaluations.
\nAn object that represents the recording of configuration\n\t\t\tchanges of an Amazon Web Services resource.
" + "smithy.api#documentation": "Records configuration changes to specified resource types.\n\t\t\tFor more information about the configuration recorder,\n\t\t\tsee \n Managing the Configuration Recorder\n in the Config Developer Guide.
" } }, "com.amazonaws.configservice#ConfigurationRecorderList": { @@ -3825,7 +3825,7 @@ } }, "traits": { - "smithy.api#documentation": "Returns a filtered list of Detective or Proactive Config rules. By default, if the filter is not defined, this API returns an unfiltered list. For more information on Detective or Proactive Config rules,\n\t\t\tsee \n Evaluation Mode\n in the Config Developer Guide.
" + "smithy.api#documentation": "Returns a filtered list of Detective or Proactive Config rules. By default, if the filter is not defined, this API returns an unfiltered list. For more information on Detective or Proactive Config rules,\n\t\t\tsee \n Evaluation Mode\n in the Config Developer Guide.
" } }, "com.amazonaws.configservice#DescribeConfigRulesRequest": { @@ -3846,7 +3846,7 @@ "Filters": { "target": "com.amazonaws.configservice#DescribeConfigRulesFilters", "traits": { - "smithy.api#documentation": "Returns a list of Detective or Proactive Config rules. By default, this API returns an unfiltered list. For more information on Detective or Proactive Config rules,\n\t\t\tsee \n Evaluation Mode\n in the Config Developer Guide.
" + "smithy.api#documentation": "Returns a list of Detective or Proactive Config rules. By default, this API returns an unfiltered list. For more information on Detective or Proactive Config rules,\n\t\t\tsee \n Evaluation Mode\n in the Config Developer Guide.
" } } }, @@ -4055,7 +4055,7 @@ } ], "traits": { - "smithy.api#documentation": "Returns the current status of the specified configuration\n\t\t\trecorder as well as the status of the last recording event for the recorder. If a configuration recorder is not specified, this action\n\t\t\treturns the status of all configuration recorders associated with\n\t\t\tthe account.
\nCurrently, you can specify only one configuration recorder\n\t\t\t\tper region in your account. For a detailed status of recording events over time, add your Config events to Amazon CloudWatch metrics and use CloudWatch metrics.
\nReturns the current status of the specified configuration\n\t\t\trecorder as well as the status of the last recording event for the recorder. If a configuration recorder is not specified, this action\n\t\t\treturns the status of all configuration recorders associated with\n\t\t\tthe account.
\n>You can specify only one configuration recorder for each Amazon Web Services Region for each account.\n\t\t\t\tFor a detailed status of recording events over time, add your Config events to Amazon CloudWatch metrics and use CloudWatch metrics.
\nReturns the details for the specified configuration recorders.\n\t\t\tIf the configuration recorder is not specified, this action returns\n\t\t\tthe details for all configuration recorders associated with the\n\t\t\taccount.
\nCurrently, you can specify only one configuration recorder\n\t\t\t\tper region in your account.
\nReturns the details for the specified configuration recorders.\n\t\t\tIf the configuration recorder is not specified, this action returns\n\t\t\tthe details for all configuration recorders associated with the\n\t\t\taccount.
\nYou can specify only one configuration recorder for each Amazon Web Services Region for each account.
\nA comma-separated list of resource types to exclude from recording by the configuration\n\t\t\trecorder.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Specifies whether the configuration recorder excludes resource types from being recorded.\n\t\t\tUse the resourceTypes
field to enter a comma-separated list of resource types to exclude as exemptions.
You have provided a configuration recorder name that is not\n\t\t\tvalid.
", + "smithy.api#documentation": "You have provided a name for the configuration recorder that is not\n\t\t\tvalid.
", "smithy.api#error": "client" } }, @@ -7298,7 +7312,7 @@ } }, "traits": { - "smithy.api#documentation": "Config throws an exception if the recording group does not contain a valid list of resource types. Values that are not valid might also be incorrectly formatted.
", + "smithy.api#documentation": "Indicates one of the following errors:
\nYou have provided a combination of parameter values that is not valid. For example:
\nSetting the allSupported
field of RecordingGroup to true
,\n\t\t\t\t\t\tbut providing a non-empty list for the resourceTypes
field of RecordingGroup.
Setting the allSupported
field of RecordingGroup to true
, but also setting the useOnly
field of RecordingStrategy to EXCLUSION_BY_RESOURCE_TYPES
.
Every parameter is either null, false, or empty.
\nYou have reached the limit of the number of resource types you can provide for the recording group.
\nYou have provided resource types or a recording strategy that are not valid.
\nYou have provided a null or empty role ARN.
", + "smithy.api#documentation": "You have provided a null or empty Amazon Resource Name (ARN) for the IAM role assumed by Config and used by the configuration recorder.
", "smithy.api#error": "client" } }, @@ -7999,7 +8013,7 @@ } }, "traits": { - "smithy.api#documentation": "You have reached the limit of the number of recorders you can\n\t\t\tcreate.
", + "smithy.api#documentation": "You have reached the limit of the number of configuration recorders you can\n\t\t\tcreate.
", "smithy.api#error": "client" } }, @@ -8014,7 +8028,7 @@ } }, "traits": { - "smithy.api#documentation": "You have reached the limit of the number of conformance packs you can create in an account. For more information, see \n Service Limits\n in the Config Developer Guide.
", + "smithy.api#documentation": "You have reached the limit of the number of conformance packs you can create in an account. For more information, see \n Service Limits\n in the Config Developer Guide.
", "smithy.api#error": "client" } }, @@ -8044,7 +8058,7 @@ } }, "traits": { - "smithy.api#documentation": "You have reached the limit of the number of organization Config rules you can create. For more information, see see \n Service Limits\n in the Config Developer Guide.
", + "smithy.api#documentation": "You have reached the limit of the number of organization Config rules you can create. For more information, see see \n Service Limits\n in the Config Developer Guide.
", "smithy.api#error": "client" } }, @@ -8059,7 +8073,7 @@ } }, "traits": { - "smithy.api#documentation": "You have reached the limit of the number of organization conformance packs you can create in an account. For more information, see \n Service Limits\n in the Config Developer Guide.
", + "smithy.api#documentation": "You have reached the limit of the number of organization conformance packs you can create in an account. For more information, see \n Service Limits\n in the Config Developer Guide.
", "smithy.api#error": "client" } }, @@ -9075,7 +9089,7 @@ } }, "traits": { - "smithy.api#documentation": "An object that specifies metadata for your organization Config Custom Policy rule including the runtime system in use, which accounts have debug logging enabled, and\n\t\t\tother custom rule metadata such as resource type, resource ID of Amazon Web Services\n\t\t\tresource, and organization trigger types that trigger Config to evaluate\n\t\t\t\tAmazon Web Services resources against a rule.
" + "smithy.api#documentation": "metadata for your organization Config Custom Policy rule including the runtime system in use, which accounts have debug logging enabled, and\n\t\t\tother custom rule metadata such as resource type, resource ID of Amazon Web Services\n\t\t\tresource, and organization trigger types that trigger Config to evaluate\n\t\t\t\tAmazon Web Services resources against a rule.
" } }, "com.amazonaws.configservice#OrganizationCustomRuleMetadata": { @@ -9139,7 +9153,7 @@ } }, "traits": { - "smithy.api#documentation": "An object that specifies organization custom rule metadata such as resource type, resource ID of Amazon Web Services resource, Lambda function ARN, \n\t\t\tand organization trigger types that trigger Config to evaluate your Amazon Web Services resources against a rule. \n\t\t\tIt also provides the frequency with which you want Config to run evaluations for the rule if the trigger type is periodic.
" + "smithy.api#documentation": "organization custom rule metadata such as resource type, resource ID of Amazon Web Services resource, Lambda function ARN, \n\t\t\tand organization trigger types that trigger Config to evaluate your Amazon Web Services resources against a rule. \n\t\t\tIt also provides the frequency with which you want Config to run evaluations for the rule if the trigger type is periodic.
" } }, "com.amazonaws.configservice#OrganizationManagedRuleMetadata": { @@ -9196,7 +9210,7 @@ } }, "traits": { - "smithy.api#documentation": "An object that specifies organization managed rule metadata such as resource type and ID of Amazon Web Services resource along with the rule identifier. \n\t\t\tIt also provides the frequency with which you want Config to run evaluations for the rule if the trigger type is periodic.
" + "smithy.api#documentation": "organization managed rule metadata such as resource type and ID of Amazon Web Services resource along with the rule identifier. \n\t\t\tIt also provides the frequency with which you want Config to run evaluations for the rule if the trigger type is periodic.
" } }, "com.amazonaws.configservice#OrganizationResourceDetailedStatus": { @@ -9727,7 +9741,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a new configuration recorder to record the selected\n\t\t\tresource configurations.
\nYou can use this action to change the role roleARN
\n\t\t\tor the recordingGroup
of an existing recorder. To\n\t\t\tchange the role, call the action on the existing configuration\n\t\t\trecorder and specify a role.
Currently, you can specify only one configuration recorder\n\t\t\t\tper region in your account.
\nIf ConfigurationRecorder
does not have the\n\t\t\t\t\trecordingGroup parameter\n\t\t\t\tspecified, the default is to record all supported resource\n\t\t\t\ttypes.
Creates a new configuration recorder to record configuration changes for specified resource types.
\nYou can also use this action to change the roleARN
\n\t\t\tor the recordingGroup
of an existing recorder.\n\t\t\tFor more information, see \n Managing the Configuration Recorder\n in the Config Developer Guide.
You can specify only one configuration recorder for each Amazon Web Services Region for each account.
\nIf the configuration recorder does not have the\n\t\t\t\t\trecordingGroup
field\n\t\t\t\tspecified, the default is to record all supported resource\n\t\t\t\ttypes.
The configuration recorder object that records each\n\t\t\tconfiguration change made to the resources.
", + "smithy.api#documentation": "An object for the configuration recorder to record configuration changes for specified resource types.
", "smithy.api#required": {} } } @@ -9772,7 +9786,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates or updates a conformance pack. A conformance pack is a collection of Config rules that can be easily deployed in an account and a region and across an organization.\n\t\t\tFor information on how many conformance packs you can have per account, \n\t\t\tsee \n Service Limits\n in the Config Developer Guide.
\nThis API creates a service-linked role AWSServiceRoleForConfigConforms
in your account. \n\t\tThe service-linked role is created only when the role does not exist in your account.
You must specify only one of the follow parameters: TemplateS3Uri
, TemplateBody
or TemplateSSMDocumentDetails
.
Creates or updates a conformance pack. A conformance pack is a collection of Config rules that can be easily deployed in an account and a region and across an organization.\n\t\t\tFor information on how many conformance packs you can have per account, \n\t\t\tsee \n Service Limits\n in the Config Developer Guide.
\nThis API creates a service-linked role AWSServiceRoleForConfigConforms
in your account. \n\t\tThe service-linked role is created only when the role does not exist in your account.
You must specify only one of the follow parameters: TemplateS3Uri
, TemplateBody
or TemplateSSMDocumentDetails
.
Deploys conformance packs across member accounts in an Amazon Web Services Organization. For information on how many organization conformance packs and how many Config rules you can have per account, \n\t\t\tsee \n Service Limits\n in the Config Developer Guide.
\nOnly a management account and a delegated administrator can call this API. \n\t\t\tWhen calling this API with a delegated administrator, you must ensure Organizations \n\t\t\tListDelegatedAdministrator
permissions are added. An organization can have up to 3 delegated administrators.
This API enables organization service access for config-multiaccountsetup.amazonaws.com
\n\t\t\tthrough the EnableAWSServiceAccess
action and creates a \n\t\t\tservice-linked role AWSServiceRoleForConfigMultiAccountSetup
in the management or delegated administrator account of your organization. \n\t\t\tThe service-linked role is created only when the role does not exist in the caller account. \n\t\t\tTo use this API with delegated administrator, register a delegated administrator by calling Amazon Web Services Organization \n\t\t\tregister-delegate-admin
for config-multiaccountsetup.amazonaws.com
.
Prerequisite: Ensure you call EnableAllFeatures
API to enable all features in an organization.
You must specify either the TemplateS3Uri
or the TemplateBody
parameter, but not both. \n\t\t\tIf you provide both Config uses the TemplateS3Uri
parameter and ignores the TemplateBody
parameter.
Config sets the state of a conformance pack to CREATE_IN_PROGRESS and UPDATE_IN_PROGRESS until the conformance pack is created or updated. \n\t\t\t\tYou cannot update a conformance pack while it is in this state.
\nDeploys conformance packs across member accounts in an Amazon Web Services Organization. For information on how many organization conformance packs and how many Config rules you can have per account, \n\t\t\tsee \n Service Limits\n in the Config Developer Guide.
\nOnly a management account and a delegated administrator can call this API. \n\t\t\tWhen calling this API with a delegated administrator, you must ensure Organizations \n\t\t\tListDelegatedAdministrator
permissions are added. An organization can have up to 3 delegated administrators.
This API enables organization service access for config-multiaccountsetup.amazonaws.com
\n\t\t\tthrough the EnableAWSServiceAccess
action and creates a \n\t\t\tservice-linked role AWSServiceRoleForConfigMultiAccountSetup
in the management or delegated administrator account of your organization. \n\t\t\tThe service-linked role is created only when the role does not exist in the caller account. \n\t\t\tTo use this API with delegated administrator, register a delegated administrator by calling Amazon Web Services Organization \n\t\t\tregister-delegate-admin
for config-multiaccountsetup.amazonaws.com
.
Prerequisite: Ensure you call EnableAllFeatures
API to enable all features in an organization.
You must specify either the TemplateS3Uri
or the TemplateBody
parameter, but not both. \n\t\t\tIf you provide both Config uses the TemplateS3Uri
parameter and ignores the TemplateBody
parameter.
Config sets the state of a conformance pack to CREATE_IN_PROGRESS and UPDATE_IN_PROGRESS until the conformance pack is created or updated. \n\t\t\t\tYou cannot update a conformance pack while it is in this state.
\nA remediation exception is when a specified resource is no longer considered for auto-remediation. \n\t\t\tThis API adds a new exception or updates an existing exception for a specified resource with a specified Config rule.
\nConfig generates a remediation exception when a problem occurs running a remediation action for a specified resource. \n\t\t\tRemediation exceptions blocks auto-remediation until the exception is cleared.
\nWhen placing an exception on an Amazon Web Services resource, it is recommended that remediation is set as manual remediation until\n\t\t\tthe given Config rule for the specified resource evaluates the resource as NON_COMPLIANT
.\n\t\t\tOnce the resource has been evaluated as NON_COMPLIANT
, you can add remediation exceptions and change the remediation type back from Manual to Auto if you want to use auto-remediation.\n\t\t\tOtherwise, using auto-remediation before a NON_COMPLIANT
evaluation result can delete resources before the exception is applied.
Placing an exception can only be performed on resources that are NON_COMPLIANT
.\n\t\t\tIf you use this API for COMPLIANT
resources or resources that are NOT_APPLICABLE
, a remediation exception will not be generated.\n\t\t\tFor more information on the conditions that initiate the possible Config evaluation results,\n\t\t\tsee Concepts | Config Rules in the Config Developer Guide.
A remediation exception is when a specified resource is no longer considered for auto-remediation. \n\t\t\tThis API adds a new exception or updates an existing exception for a specified resource with a specified Config rule.
\nConfig generates a remediation exception when a problem occurs running a remediation action for a specified resource. \n\t\t\tRemediation exceptions blocks auto-remediation until the exception is cleared.
\nWhen placing an exception on an Amazon Web Services resource, it is recommended that remediation is set as manual remediation until\n\t\t\tthe given Config rule for the specified resource evaluates the resource as NON_COMPLIANT
.\n\t\t\tOnce the resource has been evaluated as NON_COMPLIANT
, you can add remediation exceptions and change the remediation type back from Manual to Auto if you want to use auto-remediation.\n\t\t\tOtherwise, using auto-remediation before a NON_COMPLIANT
evaluation result can delete resources before the exception is applied.
Placing an exception can only be performed on resources that are NON_COMPLIANT
.\n\t\t\tIf you use this API for COMPLIANT
resources or resources that are NOT_APPLICABLE
, a remediation exception will not be generated.\n\t\t\tFor more information on the conditions that initiate the possible Config evaluation results,\n\t\t\tsee Concepts | Config Rules in the Config Developer Guide.
Specifies whether Config records configuration changes for\n\t\t\tevery supported type of regional resource.
\nIf you set this option to true
, when Config\n\t\t\tadds support for a new type of regional resource, it starts\n\t\t\trecording resources of that type automatically.
If you set this option to true
, you cannot\n\t\t\tenumerate a list of resourceTypes
.
Specifies whether Config records configuration changes for all supported regional resource types.
\nIf you set this field to true
, when Config\n\t\t\tadds support for a new type of regional resource, Config starts recording resources of that type automatically.
If you set this field to true
,\n\t\t\tyou cannot enumerate specific resource types to record in the resourceTypes
field of RecordingGroup, or to exclude in the resourceTypes
field of ExclusionByResourceTypes.
Specifies whether Config includes all supported types of\n\t\t\tglobal resources (for example, IAM resources) with the resources\n\t\t\tthat it records.
\nBefore you can set this option to true
, you must\n\t\t\tset the allSupported
option to\n\t\t\ttrue
.
If you set this option to true
, when Config\n\t\t\tadds support for a new type of global resource, it starts recording\n\t\t\tresources of that type automatically.
The configuration details for any global resource are the same\n\t\t\tin all regions. To prevent duplicate configuration items, you should\n\t\t\tconsider customizing Config in only one region to record global\n\t\t\tresources.
" + "smithy.api#documentation": "Specifies whether Config records configuration changes for all supported global resources.
\nBefore you set this field to true
,\n\t\t\tset the allSupported
field of RecordingGroup to\n\t\t\ttrue
. Optionally, you can set the useOnly
field of RecordingStrategy to ALL_SUPPORTED_RESOURCE_TYPES
.
If you set this field to true
, when Config\n\t\t\tadds support for a new type of global resource in the Region where you set up the configuration recorder, Config starts recording\n\t\t\tresources of that type automatically.
If you set this field to false
but list global resource types in the resourceTypes
field of RecordingGroup,\n\t\t\tConfig will still record configuration changes for those specified resource types regardless of if you set the includeGlobalResourceTypes
field to false.
If you do not want to record configuration changes to global resource types, make sure to not list them in the resourceTypes
field\n\t\t\tin addition to setting the includeGlobalResourceTypes
field to false.
A comma-separated list that specifies the types of Amazon Web Services\n\t\t\tresources for which Config records configuration changes (for\n\t\t\texample, AWS::EC2::Instance
or\n\t\t\t\tAWS::CloudTrail::Trail
).
To record all configuration changes, you must\n\t\t\tset the allSupported
option to\n\t\t\ttrue
.
If you set the AllSupported
option to false and populate the ResourceTypes
option with values,\n\t\t\twhen Config adds support for a new type of resource,\n\t\t\tit will not record resources of that type unless you manually add that type to your recording group.
For a list of valid resourceTypes
values, see the\n\t\t\t\tresourceType Value column in\n\t\t\t\tSupported Amazon Web Services resource Types.
A comma-separated list that specifies which resource types Config\n\t\t\trecords.
\nOptionally, you can set the useOnly
field of RecordingStrategy to INCLUSION_BY_RESOURCE_TYPES
.
To record all configuration changes,\n\t\t\t\tset the allSupported
field of RecordingGroup to\n\t\t\t\ttrue
, and either omit this field or don't specify any resource types in this field. If you set the allSupported
field to false
and specify values for resourceTypes
,\n\t\t\t\t\twhen Config adds support for a new type of resource,\n\t\t\t\t\tit will not record resources of that type unless you manually add that type to your recording group.
For a list of valid resourceTypes
values, see the\n\t\t\t\tResource Type Value column in\n\t\t\t\tSupported Amazon Web Services resource Types in the Config developer guide.
\n Region Availability\n
\nBefore specifying a resource type for Config to track,\n\t\t\t\tcheck Resource Coverage by Region Availability\n\t\t\t\tto see if the resource type is supported in the Amazon Web Services Region where you set up Config.\n\t\t\t\tIf a resource type is supported by Config in at least one Region,\n\t\t\t\tyou can enable the recording of that resource type in all Regions supported by Config,\n\t\t\t\teven if the specified resource type is not supported in the Amazon Web Services Region where you set up Config.
\nAn object that specifies how Config excludes resource types from being recorded by the configuration recorder.
\nTo use this option, you must set the useOnly
field of RecordingStrategy to EXCLUSION_BY_RESOURCE_TYPES
.
An object that specifies the recording strategy for the configuration recorder.
\nIf you set the useOnly
field of RecordingStrategy to ALL_SUPPORTED_RESOURCE_TYPES
, Config records configuration changes for all supported regional resource types. You also must set the allSupported
field of RecordingGroup to true
. When Config adds support for a new type of regional resource, Config automatically starts recording resources of that type.
If you set the useOnly
field of RecordingStrategy to INCLUSION_BY_RESOURCE_TYPES
, Config records configuration changes for only the resource types you specify in the resourceTypes
field of RecordingGroup.
If you set the useOnly
field of RecordingStrategy to EXCLUSION_BY_RESOURCE_TYPES
, Config records configuration changes for all supported resource types\n\t\t\t\texcept the resource types that you specify as exemptions to exclude from being recorded in the resourceTypes
field of ExclusionByResourceTypes.
The recordingStrategy
field is optional when you set the\n\t\t\tallSupported
field of RecordingGroup to true
.
The recordingStrategy
field is optional when you list resource types in the\n\t\t\t\tresourceTypes
field of RecordingGroup.
The recordingStrategy
field is required if you list resource types to exclude from recording in the resourceTypes
field of ExclusionByResourceTypes.
If you choose EXCLUSION_BY_RESOURCE_TYPES
for the recording strategy, the exclusionByResourceTypes
field will override other properties in the request.
For example, even if you set includeGlobalResourceTypes
to false, global resource types will still be automatically\n\t\t\trecorded in this option unless those resource types are specifically listed as exemptions in the resourceTypes
field of exclusionByResourceTypes
.
By default, if you choose the EXCLUSION_BY_RESOURCE_TYPES
recording strategy,\n\t\t\t\twhen Config adds support for a new resource type in the Region where you set up the configuration recorder, including global resource types,\n\t\t\t\tConfig starts recording resources of that type automatically.
Specifies which resource types Config\n\t\t\trecords for configuration changes.\n\t\t\tIn the recording group, you specify whether you want to record all supported resource types or to include or exclude specific types of resources.
\nBy default, Config records configuration changes for all supported types of\n\t\t\t\tRegional resources that Config discovers in the\n\t\t\t\tAmazon Web Services Region in which it is running. Regional resources are tied to a\n\t\t\tRegion and can be used only in that Region. Examples of Regional resources are Amazon EC2 instances and Amazon EBS volumes.
\nYou can also have Config record supported types of global resources.\n\t\t\t\tGlobal resources are not tied to a specific Region and can be used in all Regions. The global\n\t\t\t\tresource types that Config supports include IAM users, groups, roles, and customer managed\n\t\t\t\tpolicies.
\nGlobal resource types onboarded to Config recording after February 2022 will\n\t\t\t\tbe recorded only in the service's home Region for the commercial partition and\n\t\t\t\tAmazon Web Services GovCloud (US-West) for the Amazon Web Services GovCloud (US) partition. You can view the\n\t\t\t\tConfiguration Items for these new global resource types only in their home Region\n\t\t\t\tand Amazon Web Services GovCloud (US-West).
\nIf you don't want Config to record all resources, you can specify which types of resources Config records with the resourceTypes
parameter.
For a list of supported resource types, see Supported Resource Types in the Config developer guide.
\nFor more information and a table of the Home Regions for Global Resource Types Onboarded after February 2022, see Selecting Which Resources Config Records in the Config developer guide.
" + } + }, + "com.amazonaws.configservice#RecordingStrategy": { + "type": "structure", + "members": { + "useOnly": { + "target": "com.amazonaws.configservice#RecordingStrategyType", + "traits": { + "smithy.api#documentation": "The recording strategy for the configuration recorder.
\nIf you set this option to ALL_SUPPORTED_RESOURCE_TYPES
, Config records configuration changes for all supported regional resource types. You also must set the allSupported
field of RecordingGroup to true
.
When Config adds support for a new type of regional resource, Config automatically starts recording resources of that type. For a list of supported resource types,\n\t\t\t\tsee Supported Resource Types in the Config developer guide.
\nIf you set this option to INCLUSION_BY_RESOURCE_TYPES
, Config records\n\t\t\t\t\tconfiguration changes for only the resource types that you specify in the\n\t\t\t\t\t\tresourceTypes
field of RecordingGroup.
If you set this option to EXCLUSION_BY_RESOURCE_TYPES
, Config records\n\t\t\t\t\tconfiguration changes for all supported resource types, except the resource\n\t\t\t\t\ttypes that you specify as exemptions to exclude from being recorded in the\n\t\t\t\t\t\tresourceTypes
field of ExclusionByResourceTypes.
The recordingStrategy
field is optional when you set the\n\t\t\tallSupported
field of RecordingGroup to true
.
The recordingStrategy
field is optional when you list resource types in the\n\t\t\t\tresourceTypes
field of RecordingGroup.
The recordingStrategy
field is required if you list resource types to exclude from recording in the resourceTypes
field of ExclusionByResourceTypes.
If you choose EXCLUSION_BY_RESOURCE_TYPES
for the recording strategy, the exclusionByResourceTypes
field will override other properties in the request.
For example, even if you set includeGlobalResourceTypes
to false, global resource types will still be automatically\n\t\t\trecorded in this option unless those resource types are specifically listed as exemptions in the resourceTypes
field of exclusionByResourceTypes
.
By default, if you choose the EXCLUSION_BY_RESOURCE_TYPES
recording strategy,\n\t\t\t\twhen Config adds support for a new resource type in the Region where you set up the configuration recorder, including global resource types,\n\t\t\t\tConfig starts recording resources of that type automatically.
Specifies which Amazon Web Services resource types Config\n\t\t\trecords for configuration changes. In the recording group, you specify whether you want to record all supported resource types\n\t\t\tor only specific types of resources.
\nBy default, Config records the configuration changes for all supported types of\n\t\t\t\tregional resources that Config discovers in the region in which it is\n\t\t\t\trunning. Regional resources are tied to a region and can be used only in that region. Examples\n\t\t\t\tof regional resources are EC2 instances and EBS volumes.
\nYou can also have Config record supported types of global resources.\n\t\t\t\tGlobal resources are not tied to a specific region and can be used in all regions. The global\n\t\t\t\tresource types that Config supports include IAM users, groups, roles, and customer managed\n\t\t\t\tpolicies.
\nGlobal resource types onboarded to Config recording after February 2022 will only be\n\t\t\t\trecorded in the service's home region for the commercial partition and\n\t\t\t\tAmazon Web Services GovCloud (US) West for the GovCloud partition. You can view the Configuration Items for\n\t\t\t\tthese new global resource types only in their home region and Amazon Web Services GovCloud (US) West.
\nSupported global resource types onboarded before February 2022 such as\n\t\t\t\tAWS::IAM::Group
, AWS::IAM::Policy
, AWS::IAM::Role
,\n\t\t\t\tAWS::IAM::User
remain unchanged, and they will continue to deliver\n\t\t\t\tConfiguration Items in all supported regions in Config. The change will only affect new global\n\t\t\t\tresource types onboarded after February 2022.
To record global resource types onboarded after February 2022,\n\t\t\t\tenable All Supported Resource Types in the home region of the global resource type you want to record.
\nIf you don't want Config to record all resources, you can\n\t\t\tspecify which types of resources it will record with the\n\t\t\t\tresourceTypes
parameter.
For a list of supported resource types, see Supported Resource Types.
\nFor more information and a table of the Home Regions for Global Resource Types Onboarded after February 2022, see Selecting Which Resources Config Records.
" + "smithy.api#documentation": "Specifies the recording strategy of the configuration recorder.
" + } + }, + "com.amazonaws.configservice#RecordingStrategyType": { + "type": "enum", + "members": { + "ALL_SUPPORTED_RESOURCE_TYPES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ALL_SUPPORTED_RESOURCE_TYPES" + } + }, + "INCLUSION_BY_RESOURCE_TYPES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INCLUSION_BY_RESOURCE_TYPES" + } + }, + "EXCLUSION_BY_RESOURCE_TYPES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EXCLUSION_BY_RESOURCE_TYPES" + } + } } }, "com.amazonaws.configservice#ReevaluateConfigRuleNames": { @@ -13492,7 +13555,7 @@ } ], "traits": { - "smithy.api#documentation": "Accepts a structured query language (SQL) SELECT command and an aggregator to query configuration state of Amazon Web Services resources across multiple accounts and regions, \n\t\t\tperforms the corresponding search, and returns resource configurations matching the properties.
\nFor more information about query components, see the \n\t\t\t\n Query Components\n section in the Config Developer Guide.
\nIf you run an aggregation query (i.e., using GROUP BY
or using aggregate functions such as COUNT
; e.g., SELECT resourceId, COUNT(*) WHERE resourceType = 'AWS::IAM::Role' GROUP BY resourceId
)\n\t\t\t\tand do not specify the MaxResults
or the Limit
query parameters, the default page size is set to 500.
If you run a non-aggregation query (i.e., not using GROUP BY
or aggregate function; e.g., SELECT * WHERE resourceType = 'AWS::IAM::Role'
)\n\t\t\t\tand do not specify the MaxResults
or the Limit
query parameters, the default page size is set to 25.
Accepts a structured query language (SQL) SELECT command and an aggregator to query configuration state of Amazon Web Services resources across multiple accounts and regions, \n\t\t\tperforms the corresponding search, and returns resource configurations matching the properties.
\nFor more information about query components, see the \n\t\t\t\n Query Components\n section in the Config Developer Guide.
\nIf you run an aggregation query (i.e., using GROUP BY
or using aggregate functions such as COUNT
; e.g., SELECT resourceId, COUNT(*) WHERE resourceType = 'AWS::IAM::Role' GROUP BY resourceId
)\n\t\t\t\tand do not specify the MaxResults
or the Limit
query parameters, the default page size is set to 500.
If you run a non-aggregation query (i.e., not using GROUP BY
or aggregate function; e.g., SELECT * WHERE resourceType = 'AWS::IAM::Role'
)\n\t\t\t\tand do not specify the MaxResults
or the Limit
query parameters, the default page size is set to 25.
You have reached the limit of the number of tags you can use. \n\t\t\tFor more information, see \n Service Limits\n in the Config Developer Guide.
", + "smithy.api#documentation": "You have reached the limit of the number of tags you can use. \n\t\t\tFor more information, see \n Service Limits\n in the Config Developer Guide.
", "smithy.api#error": "client" } }, diff --git a/aws/sdk/aws-models/dynamodb.json b/aws/sdk/aws-models/dynamodb.json index adb4cf00ebc6ea886f64a3e713d5cc9f5661048c..7c5a2d767878196a48d71274e14423ef9c51dc24 100644 --- a/aws/sdk/aws-models/dynamodb.json +++ b/aws/sdk/aws-models/dynamodb.json @@ -833,7 +833,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "The BatchGetItem
operation returns the attributes of one or more items\n from one or more tables. You identify requested items by primary key.
A single operation can retrieve up to 16 MB of data, which can contain as many as 100\n items. BatchGetItem
returns a partial result if the response size limit is\n exceeded, the table's provisioned throughput is exceeded, or an internal processing\n failure occurs. If a partial result is returned, the operation returns a value for\n UnprocessedKeys
. You can use this value to retry the operation starting\n with the next item to get.
If you request more than 100 items, BatchGetItem
returns a\n ValidationException
with the message \"Too many items requested for\n the BatchGetItem call.\"
For example, if you ask to retrieve 100 items, but each individual item is 300 KB in\n size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns\n an appropriate UnprocessedKeys
value so you can get the next page of\n results. If desired, your application can include its own logic to assemble the pages of\n results into one dataset.
If none of the items can be processed due to insufficient\n provisioned throughput on all of the tables in the request, then\n BatchGetItem
returns a\n ProvisionedThroughputExceededException
. If at least\n one of the items is successfully processed, then\n BatchGetItem
completes successfully, while returning the keys of the\n unread items in UnprocessedKeys
.
If DynamoDB returns any unprocessed items, you should retry the batch operation on\n those items. However, we strongly recommend that you use an exponential\n backoff algorithm. If you retry the batch operation immediately, the\n underlying read or write requests can still fail due to throttling on the individual\n tables. If you delay the batch operation using exponential backoff, the individual\n requests in the batch are much more likely to succeed.
\nFor more information, see Batch Operations and Error Handling in the Amazon DynamoDB\n Developer Guide.
\nBy default, BatchGetItem
performs eventually consistent reads on every\n table in the request. If you want strongly consistent reads instead, you can set\n ConsistentRead
to true
for any or all tables.
In order to minimize response latency, BatchGetItem
may retrieve items in\n parallel.
When designing your application, keep in mind that DynamoDB does not return items in\n any particular order. To help parse the response by item, include the primary key values\n for the items in your request in the ProjectionExpression
parameter.
If a requested item does not exist, it is not returned in the result. Requests for\n nonexistent items consume the minimum read capacity units according to the type of read.\n For more information, see Working with Tables in the Amazon DynamoDB Developer\n Guide.
" + "smithy.api#documentation": "The BatchGetItem
operation returns the attributes of one or more items\n from one or more tables. You identify requested items by primary key.
A single operation can retrieve up to 16 MB of data, which can contain as many as 100\n items. BatchGetItem
returns a partial result if the response size limit is\n exceeded, the table's provisioned throughput is exceeded, more than 1MB per partition is requested,\n or an internal processing failure occurs. If a partial result is returned, the operation returns a value for\n UnprocessedKeys
. You can use this value to retry the operation starting\n with the next item to get.
If you request more than 100 items, BatchGetItem
returns a\n ValidationException
with the message \"Too many items requested for\n the BatchGetItem call.\"
For example, if you ask to retrieve 100 items, but each individual item is 300 KB in\n size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns\n an appropriate UnprocessedKeys
value so you can get the next page of\n results. If desired, your application can include its own logic to assemble the pages of\n results into one dataset.
If none of the items can be processed due to insufficient\n provisioned throughput on all of the tables in the request, then\n BatchGetItem
returns a\n ProvisionedThroughputExceededException
. If at least\n one of the items is successfully processed, then\n BatchGetItem
completes successfully, while returning the keys of the\n unread items in UnprocessedKeys
.
If DynamoDB returns any unprocessed items, you should retry the batch operation on\n those items. However, we strongly recommend that you use an exponential\n backoff algorithm. If you retry the batch operation immediately, the\n underlying read or write requests can still fail due to throttling on the individual\n tables. If you delay the batch operation using exponential backoff, the individual\n requests in the batch are much more likely to succeed.
\nFor more information, see Batch Operations and Error Handling in the Amazon DynamoDB\n Developer Guide.
\nBy default, BatchGetItem
performs eventually consistent reads on every\n table in the request. If you want strongly consistent reads instead, you can set\n ConsistentRead
to true
for any or all tables.
In order to minimize response latency, BatchGetItem
may retrieve items in\n parallel.
When designing your application, keep in mind that DynamoDB does not return items in\n any particular order. To help parse the response by item, include the primary key values\n for the items in your request in the ProjectionExpression
parameter.
If a requested item does not exist, it is not returned in the result. Requests for\n nonexistent items consume the minimum read capacity units according to the type of read.\n For more information, see Working with Tables in the Amazon DynamoDB Developer\n Guide.
" } }, "com.amazonaws.dynamodb#BatchGetItemInput": { @@ -6732,7 +6732,7 @@ } }, "traits": { - "smithy.api#documentation": "There is no limit to the number of daily on-demand backups that can be taken.
\nFor most purposes, up to 500 simultaneous table operations are allowed per account. These operations\n include CreateTable
, UpdateTable
,\n DeleteTable
,UpdateTimeToLive
,\n RestoreTableFromBackup
, and RestoreTableToPointInTime
.
When you are creating a table with one or more secondary\n indexes, you can have up to 250 such requests running at a time. However, if the table or\n index specifications are complex, then DynamoDB might temporarily reduce the number\n of concurrent operations.
\nWhen importing into DynamoDB, up to 50 simultaneous import table operations are allowed per account.
\nThere is a soft account quota of 2,500 tables.
", + "smithy.api#documentation": "There is no limit to the number of daily on-demand backups that can be taken.
\nFor most purposes, up to 500 simultaneous table operations are allowed per account. These operations\n include CreateTable
, UpdateTable
,\n DeleteTable
,UpdateTimeToLive
,\n RestoreTableFromBackup
, and RestoreTableToPointInTime
.
When you are creating a table with one or more secondary\n indexes, you can have up to 250 such requests running at a time. However, if the table or\n index specifications are complex, then DynamoDB might temporarily reduce the number\n of concurrent operations.
\nWhen importing into DynamoDB, up to 50 simultaneous import table operations are allowed per account.
\nThere is a soft account quota of 2,500 tables.
\nGetRecords was called with a value of more than 1000 for the limit request parameter.
\nMore than 2 processes are reading from the same streams shard at the same time. Exceeding\n this limit may result in request throttling.
", "smithy.api#error": "client" } }, @@ -7689,14 +7689,14 @@ "ReadCapacityUnits": { "target": "com.amazonaws.dynamodb#PositiveLongObject", "traits": { - "smithy.api#documentation": "The maximum number of strongly consistent reads consumed per second before DynamoDB\n returns a ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB\n Developer Guide.
If read/write capacity mode is PAY_PER_REQUEST
the value is set to\n 0.
The maximum number of strongly consistent reads consumed per second before DynamoDB\n returns a ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB\n Developer Guide.
If read/write capacity mode is PAY_PER_REQUEST
the value is set to\n 0.
The maximum number of writes consumed per second before DynamoDB returns a\n ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB\n Developer Guide.
If read/write capacity mode is PAY_PER_REQUEST
the value is set to\n 0.
The maximum number of writes consumed per second before DynamoDB returns a\n ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB\n Developer Guide.
If read/write capacity mode is PAY_PER_REQUEST
the value is set to\n 0.
The result of the exchange and whether it was successful
.
The result of the exchange and whether it was successful
.
The ID representing the allocation of the address for use with EC2-VPC.
", + "smithy.api#documentation": "The ID representing the allocation of the address.
", "smithy.api#xmlName": "allocationId" } }, @@ -1165,7 +1184,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "AssociationId", - "smithy.api#documentation": "The ID representing the association of the address with an instance in a VPC.
", + "smithy.api#documentation": "The ID representing the association of the address with an instance.
", "smithy.api#xmlName": "associationId" } }, @@ -1173,7 +1192,7 @@ "target": "com.amazonaws.ec2#DomainType", "traits": { "aws.protocols#ec2QueryName": "Domain", - "smithy.api#documentation": "Indicates whether this Elastic IP address is for use with instances\n\t\t\t\tin EC2-Classic (standard
) or instances in a VPC (vpc
).
The network (vpc
).
Allocates an Elastic IP address to your Amazon Web Services account. After you allocate the Elastic IP address you can associate \n it with an instance or network interface. After you release an Elastic IP address, it is released to the IP address \n pool and can be allocated to a different Amazon Web Services account.
\nYou can allocate an Elastic IP address from an address pool owned by Amazon Web Services or from an address pool created \n from a public IPv4 address range that you have brought to Amazon Web Services for use with your Amazon Web Services resources using bring your own \n IP addresses (BYOIP). For more information, see Bring Your Own IP Addresses (BYOIP) in the Amazon Elastic Compute Cloud User Guide.
\n[EC2-VPC] If you release an Elastic IP address, you might be able to recover it. You cannot recover an \n Elastic IP address that you released after it is allocated to another Amazon Web Services account. You cannot recover an Elastic IP\n address for EC2-Classic. To attempt to recover an Elastic IP address that you released, specify it in this operation.
\nAn Elastic IP address is for use either in the EC2-Classic platform or in a VPC. By default, you can allocate\n 5 Elastic IP addresses for EC2-Classic per Region and 5 Elastic IP addresses for EC2-VPC per Region.
\nFor more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.
\nYou can allocate a carrier IP address which is a public IP address from a telecommunication carrier, to a network interface which resides in a subnet in a Wavelength Zone (for example an EC2 instance).
\nWe are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide.
\nAllocates an Elastic IP address to your Amazon Web Services account. After you allocate the Elastic IP address you can associate \n it with an instance or network interface. After you release an Elastic IP address, it is released to the IP address \n pool and can be allocated to a different Amazon Web Services account.
\nYou can allocate an Elastic IP address from an address pool owned by Amazon Web Services or from an address pool created \n from a public IPv4 address range that you have brought to Amazon Web Services for use with your Amazon Web Services resources using bring your own \n IP addresses (BYOIP). For more information, see Bring Your Own IP Addresses (BYOIP) in the Amazon Elastic Compute Cloud User Guide.
\nIf you release an Elastic IP address, you might be able to recover it. You cannot recover\n an Elastic IP address that you released after it is allocated to another Amazon Web Services account. To attempt to recover an Elastic IP address that you released, specify\n it in this operation.
\nFor more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.
\nYou can allocate a carrier IP address which is a public IP address from a telecommunication carrier, \n to a network interface which resides in a subnet in a Wavelength Zone (for example an EC2 instance).
" } }, "com.amazonaws.ec2#AllocateAddressRequest": { @@ -1522,13 +1544,13 @@ "Domain": { "target": "com.amazonaws.ec2#DomainType", "traits": { - "smithy.api#documentation": "Indicates whether the Elastic IP address is for use with instances in a VPC or instances in EC2-Classic.
\nDefault: If the Region supports EC2-Classic, the default is standard
. Otherwise, the default\n is vpc
.
The network (vpc
).
[EC2-VPC] The Elastic IP address to recover or an IPv4 address from an address pool.
" + "smithy.api#documentation": "The Elastic IP address to recover or an IPv4 address from an address pool.
" } }, "PublicIpv4Pool": { @@ -1586,7 +1608,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "AllocationId", - "smithy.api#documentation": "[EC2-VPC] The ID that Amazon Web Services assigns to represent the allocation of the Elastic IP address for use with instances in a VPC.
", + "smithy.api#documentation": "The ID that represents the allocation of the Elastic IP address.
", "smithy.api#xmlName": "allocationId" } }, @@ -1610,7 +1632,7 @@ "target": "com.amazonaws.ec2#DomainType", "traits": { "aws.protocols#ec2QueryName": "Domain", - "smithy.api#documentation": "Indicates whether the Elastic IP address is for use with instances in a VPC (vpc
) or\n\t\t\t\tinstances in EC2-Classic (standard
).
The network (vpc
).
The carrier IP address. This option is only available for network interfaces which reside\n in a subnet in a Wavelength Zone (for example an EC2 instance).
", + "smithy.api#documentation": "The carrier IP address. This option is only available for network interfaces that reside\n in a subnet in a Wavelength Zone.
", "smithy.api#xmlName": "carrierIp" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#AllocateHosts": { @@ -1749,7 +1774,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of AllocateHosts.
" + "smithy.api#documentation": "Contains the output of AllocateHosts.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#AllocateIpamPoolCidr": { @@ -1761,7 +1787,7 @@ "target": "com.amazonaws.ec2#AllocateIpamPoolCidrResult" }, "traits": { - "smithy.api#documentation": "Allocate a CIDR from an IPAM pool. In IPAM, an allocation is a CIDR assignment from an IPAM pool to another IPAM pool or to a resource. For more information, see Allocate CIDRs in the Amazon VPC IPAM User Guide.
\nThis action creates an allocation with strong consistency. The returned CIDR will not overlap with any other allocations from the same pool.
\nAllocate a CIDR from an IPAM pool. The Region you use should be the IPAM pool locale. The locale is the Amazon Web Services Region where this IPAM pool is available for allocations.
\nIn IPAM, an allocation is a CIDR assignment from an IPAM pool to another IPAM pool or to a resource. For more information, see Allocate CIDRs in the Amazon VPC IPAM User Guide.
\nThis action creates an allocation with strong consistency. The returned CIDR will not overlap with any other allocations from the same pool.
\nAssociates an Elastic IP address, or carrier IP address (for instances that are in\n subnets in Wavelength Zones) with an instance or a network interface. Before you can use an\n Elastic IP address, you must allocate it to your account.
\nAn Elastic IP address is for use in either the EC2-Classic platform or in a VPC.\n\t\t\tFor more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.
\n[EC2-Classic, VPC in an EC2-VPC-only account] If the Elastic IP address is already\n associated with a different instance, it is disassociated from that instance and associated\n with the specified instance. If you associate an Elastic IP address with an instance that has\n an existing Elastic IP address, the existing address is disassociated from the instance, but\n remains allocated to your account.
\n[VPC in an EC2-Classic account] If you don't specify a private IP address, the Elastic\n IP address is associated with the primary IP address. If the Elastic IP address is already\n associated with a different instance or a network interface, you get an error unless you allow\n reassociation. You cannot associate an Elastic IP address with an instance or network\n interface that has an existing Elastic IP address.
\n[Subnets in Wavelength Zones] You can associate an IP address from the telecommunication\n carrier to the instance or network interface.
\nYou cannot associate an Elastic IP address with an interface in a different network border group.
\nThis is an idempotent operation. If you perform the operation more than once, Amazon EC2\n doesn't return an error, and you may be charged for each time the Elastic IP address is\n remapped to the same instance. For more information, see the Elastic IP\n Addresses section of Amazon EC2\n Pricing.
\nWe are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide.
\nAssociates an Elastic IP address, or carrier IP address (for instances that are in\n subnets in Wavelength Zones) with an instance or a network interface. Before you can use an\n Elastic IP address, you must allocate it to your account.
\nIf the Elastic IP address is already\n associated with a different instance, it is disassociated from that instance and associated\n with the specified instance. If you associate an Elastic IP address with an instance that has\n an existing Elastic IP address, the existing address is disassociated from the instance, but\n remains allocated to your account.
\n[Subnets in Wavelength Zones] You can associate an IP address from the telecommunication\n carrier to the instance or network interface.
\nYou cannot associate an Elastic IP address with an interface in a different network border group.
\nThis is an idempotent operation. If you perform the operation more than once, Amazon EC2\n doesn't return an error, and you may be charged for each time the Elastic IP address is\n remapped to the same instance. For more information, see the Elastic IP\n Addresses section of Amazon EC2\n Pricing.
\n[EC2-VPC] The allocation ID. This is required for EC2-VPC.
" + "smithy.api#documentation": "The allocation ID. This is required.
" } }, "InstanceId": { "target": "com.amazonaws.ec2#InstanceId", "traits": { - "smithy.api#documentation": "The ID of the instance. The instance must have exactly one attached network interface.\n For EC2-VPC, you can specify either the instance ID or the network interface ID, but not both.\n For EC2-Classic, you must specify an instance ID and the instance must be in the running\n state.
" + "smithy.api#documentation": "The ID of the instance. The instance must have exactly one attached network interface.\n You can specify either the instance ID or the network interface ID, but not both.
" } }, "PublicIp": { "target": "com.amazonaws.ec2#EipAllocationPublicIp", "traits": { - "smithy.api#documentation": "[EC2-Classic] The Elastic IP address to associate with the instance. This is required for\n EC2-Classic.
" + "smithy.api#documentation": "Deprecated.
" } }, "AllowReassociation": { @@ -5950,7 +6000,7 @@ "aws.protocols#ec2QueryName": "AllowReassociation", "smithy.api#clientOptional": {}, "smithy.api#default": false, - "smithy.api#documentation": "[EC2-VPC] For a VPC in an EC2-Classic account, specify true to allow an Elastic IP address that is already associated with an instance or network interface to be reassociated with the specified instance or network interface. Otherwise, the operation fails. In a VPC in an EC2-VPC-only account, reassociation is automatic, therefore you can specify false to ensure the operation fails if the Elastic IP address is already associated with another resource.
", + "smithy.api#documentation": "Reassociation is automatic, but you can specify false to ensure the operation fails if the Elastic IP address is already associated with another resource.
", "smithy.api#xmlName": "allowReassociation" } }, @@ -5968,7 +6018,7 @@ "target": "com.amazonaws.ec2#NetworkInterfaceId", "traits": { "aws.protocols#ec2QueryName": "NetworkInterfaceId", - "smithy.api#documentation": "[EC2-VPC] The ID of the network interface. If the instance has more than one network interface, you must specify a network interface ID.
\nFor EC2-VPC, you can specify either the instance ID or the network interface ID, but not both.
", + "smithy.api#documentation": "The ID of the network interface. If the instance has more than one network interface, you must specify a network interface ID.
\nYou can specify either the instance ID or the network interface ID, but not both.
", "smithy.api#xmlName": "networkInterfaceId" } }, @@ -5976,7 +6026,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "PrivateIpAddress", - "smithy.api#documentation": "[EC2-VPC] The primary or secondary private IP address to associate with the Elastic IP address. If no private IP address is specified, the Elastic IP address is associated with the primary private IP address.
", + "smithy.api#documentation": "The primary or secondary private IP address to associate with the Elastic IP address. If no private IP address is specified, the Elastic IP address is associated with the primary private IP address.
", "smithy.api#xmlName": "privateIpAddress" } } @@ -5992,10 +6042,13 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "AssociationId", - "smithy.api#documentation": "[EC2-VPC] The ID that represents the association of the Elastic IP address with an instance.
", + "smithy.api#documentation": "The ID that represents the association of the Elastic IP address with an instance.
", "smithy.api#xmlName": "associationId" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#AssociateClientVpnTargetNetwork": { @@ -6068,6 +6121,9 @@ "smithy.api#xmlName": "status" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#AssociateDhcpOptions": { @@ -6187,6 +6243,9 @@ "smithy.api#xmlName": "encryptionKmsKeyId" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#AssociateIamInstanceProfile": { @@ -6236,6 +6295,9 @@ "smithy.api#xmlName": "iamInstanceProfileAssociation" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#AssociateInstanceEventWindow": { @@ -6293,6 +6355,9 @@ "smithy.api#xmlName": "instanceEventWindow" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#AssociateIpamResourceDiscovery": { @@ -6364,6 +6429,9 @@ "smithy.api#xmlName": "ipamResourceDiscoveryAssociation" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#AssociateNatGatewayAddress": { @@ -6437,6 +6505,9 @@ "smithy.api#xmlName": "natGatewayAddressSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#AssociateRouteTable": { @@ -6512,6 +6583,9 @@ "smithy.api#xmlName": "associationState" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#AssociateSubnetCidrBlock": { @@ -6573,6 +6647,9 @@ "smithy.api#xmlName": "subnetId" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#AssociateTransitGatewayMulticastDomain": { @@ -6638,6 +6715,9 @@ "smithy.api#xmlName": "associations" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#AssociateTransitGatewayPolicyTable": { @@ -6695,6 +6775,9 @@ "smithy.api#xmlName": "association" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#AssociateTransitGatewayRouteTable": { @@ -6752,6 +6835,9 @@ "smithy.api#xmlName": "association" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#AssociateTrunkInterface": { @@ -6840,6 +6926,9 @@ "smithy.api#xmlName": "clientToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#AssociateVpcCidrBlock": { @@ -6957,6 +7046,9 @@ "smithy.api#xmlName": "vpcId" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#AssociatedNetworkType": { @@ -7243,6 +7335,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#AttachInternetGateway": { @@ -7394,7 +7489,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of AttachNetworkInterface.
" + "smithy.api#documentation": "Contains the output of AttachNetworkInterface.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#AttachVerifiedAccessTrustProvider": { @@ -7467,6 +7563,9 @@ "smithy.api#xmlName": "verifiedAccessInstance" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#AttachVolume": { @@ -7583,7 +7682,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of AttachVpnGateway.
" + "smithy.api#documentation": "Contains the output of AttachVpnGateway.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#AttachmentEnaSrdSpecification": { @@ -7842,6 +7942,9 @@ "smithy.api#xmlName": "status" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#AuthorizeSecurityGroupEgress": { @@ -7972,6 +8075,9 @@ "smithy.api#xmlName": "securityGroupRuleSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#AuthorizeSecurityGroupIngress": { @@ -8090,6 +8196,9 @@ "smithy.api#xmlName": "securityGroupRuleSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#AutoAcceptSharedAssociationsValue": { @@ -8738,7 +8847,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of BundleInstance.
" + "smithy.api#documentation": "Contains the output of BundleInstance.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#BundleTask": { @@ -9102,7 +9212,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of CancelBundleTask.
" + "smithy.api#documentation": "Contains the output of CancelBundleTask.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#CancelCapacityReservation": { @@ -9203,6 +9314,9 @@ "smithy.api#xmlName": "failedFleetCancellationSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CancelCapacityReservationRequest": { @@ -9242,6 +9356,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CancelConversionRequest": { @@ -9371,6 +9488,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CancelImportTask": { @@ -9440,6 +9560,9 @@ "smithy.api#xmlName": "state" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CancelReservedInstancesListing": { @@ -9486,7 +9609,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of CancelReservedInstancesListing.
" + "smithy.api#documentation": "Contains the output of CancelReservedInstancesListing.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#CancelSpotFleetRequests": { @@ -9752,7 +9876,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of CancelSpotInstanceRequests.
" + "smithy.api#documentation": "Contains the output of CancelSpotInstanceRequests.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#CancelledSpotInstanceRequest": { @@ -12200,6 +12325,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ConnectionLogOptions": { @@ -12577,6 +12705,9 @@ "smithy.api#xmlName": "fpgaImageId" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CopyImage": { @@ -12691,7 +12822,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of CopyImage.
" + "smithy.api#documentation": "Contains the output of CopyImage.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#CopySnapshot": { @@ -12818,6 +12950,9 @@ "smithy.api#xmlName": "tagSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CopyTagsFromSource": { @@ -13135,6 +13270,9 @@ "smithy.api#xmlName": "tagSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateCapacityReservationRequest": { @@ -13265,6 +13403,9 @@ "smithy.api#xmlName": "capacityReservation" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateCarrierGateway": { @@ -13328,6 +13469,9 @@ "smithy.api#xmlName": "carrierGateway" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateClientVpnEndpoint": { @@ -13505,6 +13649,9 @@ "smithy.api#xmlName": "dnsName" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateClientVpnRoute": { @@ -13583,6 +13730,9 @@ "smithy.api#xmlName": "status" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateCoipCidr": { @@ -13640,6 +13790,9 @@ "smithy.api#xmlName": "coipCidr" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateCoipPool": { @@ -13696,6 +13849,9 @@ "smithy.api#xmlName": "coipPool" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateCustomerGateway": { @@ -13789,7 +13945,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of CreateCustomerGateway.
" + "smithy.api#documentation": "Contains the output of CreateCustomerGateway.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateDefaultSubnet": { @@ -13847,6 +14004,9 @@ "smithy.api#xmlName": "subnet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateDefaultVpc": { @@ -13888,6 +14048,9 @@ "smithy.api#xmlName": "vpc" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateDhcpOptions": { @@ -13948,6 +14111,9 @@ "smithy.api#xmlName": "dhcpOptions" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateEgressOnlyInternetGateway": { @@ -14018,6 +14184,9 @@ "smithy.api#xmlName": "egressOnlyInternetGateway" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateFleet": { @@ -14268,6 +14437,9 @@ "smithy.api#xmlName": "fleetInstanceSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateFlowLogs": { @@ -14411,6 +14583,9 @@ "smithy.api#xmlName": "unsuccessful" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateFpgaImage": { @@ -14499,6 +14674,9 @@ "smithy.api#xmlName": "fpgaImageGlobalId" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateImage": { @@ -14595,6 +14773,98 @@ "smithy.api#xmlName": "imageId" } } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.ec2#CreateInstanceConnectEndpoint": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#CreateInstanceConnectEndpointRequest" + }, + "output": { + "target": "com.amazonaws.ec2#CreateInstanceConnectEndpointResult" + }, + "traits": { + "smithy.api#documentation": "Creates an EC2 Instance Connect Endpoint.
\nAn EC2 Instance Connect Endpoint allows you to connect to a resource, without\n requiring the resource to have a public IPv4 address. For more information, see Connect to your resources without requiring a public IPv4 address using EC2\n Instance Connect Endpoint in the Amazon EC2 User\n Guide.
" + } + }, + "com.amazonaws.ec2#CreateInstanceConnectEndpointRequest": { + "type": "structure", + "members": { + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#default": false, + "smithy.api#documentation": "Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation
. \n Otherwise, it is UnauthorizedOperation
.
The ID of the subnet in which to create the EC2 Instance Connect Endpoint.
", + "smithy.api#required": {} + } + }, + "SecurityGroupIds": { + "target": "com.amazonaws.ec2#SecurityGroupIdStringListRequest", + "traits": { + "smithy.api#documentation": "One or more security groups to associate with the endpoint. If you don't specify a security group, \n the default security group for your VPC will be associated with the endpoint.
", + "smithy.api#xmlName": "SecurityGroupId" + } + }, + "PreserveClientIp": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#default": false, + "smithy.api#documentation": "Indicates whether your client's IP address is preserved as the source. The value is true
or false
.
If true
, your client's IP address is used when you connect to a resource.
If false
, the elastic network interface IP address is used when you connect to a resource.
Default: true
\n
Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.
", + "smithy.api#idempotencyToken": {} + } + }, + "TagSpecifications": { + "target": "com.amazonaws.ec2#TagSpecificationList", + "traits": { + "smithy.api#documentation": "The tags to apply to the EC2 Instance Connect Endpoint during creation.
", + "smithy.api#xmlName": "TagSpecification" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ec2#CreateInstanceConnectEndpointResult": { + "type": "structure", + "members": { + "InstanceConnectEndpoint": { + "target": "com.amazonaws.ec2#Ec2InstanceConnectEndpoint", + "traits": { + "aws.protocols#ec2QueryName": "InstanceConnectEndpoint", + "smithy.api#documentation": "Information about the EC2 Instance Connect Endpoint.
", + "smithy.api#xmlName": "instanceConnectEndpoint" + } + }, + "ClientToken": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "ClientToken", + "smithy.api#documentation": "Unique, case-sensitive idempotency token provided by the client in the the request.
", + "smithy.api#xmlName": "clientToken" + } + } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateInstanceEventWindow": { @@ -14662,6 +14932,9 @@ "smithy.api#xmlName": "instanceEventWindow" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateInstanceExportTask": { @@ -14740,6 +15013,9 @@ "smithy.api#xmlName": "exportTask" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateInternetGateway": { @@ -14790,6 +15066,9 @@ "smithy.api#xmlName": "internetGateway" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateIpam": { @@ -14950,6 +15229,9 @@ "smithy.api#xmlName": "ipamPool" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateIpamRequest": { @@ -15061,6 +15343,9 @@ "smithy.api#xmlName": "ipamResourceDiscovery" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateIpamResult": { @@ -15074,6 +15359,9 @@ "smithy.api#xmlName": "ipam" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateIpamScope": { @@ -15143,6 +15431,9 @@ "smithy.api#xmlName": "ipamScope" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateKeyPair": { @@ -15284,6 +15575,9 @@ "smithy.api#xmlName": "warning" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateLaunchTemplateVersion": { @@ -15379,6 +15673,9 @@ "smithy.api#xmlName": "warning" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateLocalGatewayRoute": { @@ -15452,6 +15749,9 @@ "smithy.api#xmlName": "route" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateLocalGatewayRouteTable": { @@ -15514,6 +15814,9 @@ "smithy.api#xmlName": "localGatewayRouteTable" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociation": { @@ -15578,6 +15881,9 @@ "smithy.api#xmlName": "localGatewayRouteTableVirtualInterfaceGroupAssociation" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateLocalGatewayRouteTableVpcAssociation": { @@ -15642,6 +15948,9 @@ "smithy.api#xmlName": "localGatewayRouteTableVpcAssociation" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateManagedPrefixList": { @@ -15729,6 +16038,9 @@ "smithy.api#xmlName": "prefixList" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateNatGateway": { @@ -15840,6 +16152,9 @@ "smithy.api#xmlName": "natGateway" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateNetworkAcl": { @@ -16013,6 +16328,9 @@ "smithy.api#xmlName": "networkAcl" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateNetworkInsightsAccessScope": { @@ -16092,6 +16410,9 @@ "smithy.api#xmlName": "networkInsightsAccessScopeContent" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateNetworkInsightsPath": { @@ -16203,6 +16524,9 @@ "smithy.api#xmlName": "networkInsightsPath" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateNetworkInterface": { @@ -16287,7 +16611,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of CreateNetworkInterfacePermission.
" + "smithy.api#documentation": "Contains the output of CreateNetworkInterfacePermission.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateNetworkInterfaceRequest": { @@ -16395,7 +16720,7 @@ "InterfaceType": { "target": "com.amazonaws.ec2#NetworkInterfaceCreationType", "traits": { - "smithy.api#documentation": "The type of network interface. The default is interface
.
The only supported values are efa
and trunk
.
The type of network interface. The default is interface
.
The only supported values are interface
, efa
, and trunk
.
Contains the output of CreateReservedInstancesListing.
" + "smithy.api#documentation": "Contains the output of CreateReservedInstancesListing.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateRestoreImageTask": { @@ -16805,6 +17143,9 @@ "smithy.api#xmlName": "imageId" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateRoute": { @@ -16960,6 +17301,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateRouteTable": { @@ -17020,6 +17364,9 @@ "smithy.api#xmlName": "routeTable" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateSecurityGroup": { @@ -17101,6 +17448,9 @@ "smithy.api#xmlName": "tagSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateSnapshot": { @@ -17232,6 +17582,9 @@ "smithy.api#xmlName": "snapshotSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateSpotDatafeedSubscription": { @@ -17296,7 +17649,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of CreateSpotDatafeedSubscription.
" + "smithy.api#documentation": "Contains the output of CreateSpotDatafeedSubscription.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateStoreImageTask": { @@ -17361,6 +17715,9 @@ "smithy.api#xmlName": "objectKey" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateSubnet": { @@ -17451,6 +17808,9 @@ "smithy.api#xmlName": "subnetCidrReservation" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateSubnetRequest": { @@ -17535,6 +17895,9 @@ "smithy.api#xmlName": "subnet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateTags": { @@ -17652,6 +18015,9 @@ "smithy.api#xmlName": "clientToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateTrafficMirrorFilterRule": { @@ -17783,6 +18149,9 @@ "smithy.api#xmlName": "clientToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateTrafficMirrorSession": { @@ -17901,6 +18270,9 @@ "smithy.api#xmlName": "clientToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateTrafficMirrorTarget": { @@ -17988,6 +18360,9 @@ "smithy.api#xmlName": "clientToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateTransitGateway": { @@ -18096,6 +18471,9 @@ "smithy.api#xmlName": "transitGatewayConnectPeer" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateTransitGatewayConnectRequest": { @@ -18164,6 +18542,9 @@ "smithy.api#xmlName": "transitGatewayConnect" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateTransitGatewayMulticastDomain": { @@ -18252,6 +18633,9 @@ "smithy.api#xmlName": "transitGatewayMulticastDomain" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateTransitGatewayPeeringAttachment": { @@ -18352,6 +18736,9 @@ "smithy.api#xmlName": "transitGatewayPeeringAttachment" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateTransitGatewayPolicyTable": { @@ -18407,6 +18794,9 @@ "smithy.api#xmlName": "transitGatewayPolicyTable" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateTransitGatewayPrefixListReference": { @@ -18478,6 +18868,9 @@ "smithy.api#xmlName": "transitGatewayPrefixListReference" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateTransitGatewayRequest": { @@ -18526,6 +18919,9 @@ "smithy.api#xmlName": "transitGateway" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateTransitGatewayRoute": { @@ -18597,6 +18993,9 @@ "smithy.api#xmlName": "route" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateTransitGatewayRouteTable": { @@ -18673,6 +19072,9 @@ "smithy.api#xmlName": "transitGatewayRouteTableAnnouncement" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateTransitGatewayRouteTableRequest": { @@ -18716,6 +19118,9 @@ "smithy.api#xmlName": "transitGatewayRouteTable" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateTransitGatewayVpcAttachment": { @@ -18819,6 +19224,9 @@ "smithy.api#xmlName": "transitGatewayVpcAttachment" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateVerifiedAccessEndpoint": { @@ -19016,6 +19424,9 @@ "smithy.api#xmlName": "verifiedAccessEndpoint" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateVerifiedAccessEndpointSubnetIdList": { @@ -19100,6 +19511,9 @@ "smithy.api#xmlName": "verifiedAccessGroup" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateVerifiedAccessInstance": { @@ -19161,6 +19575,9 @@ "smithy.api#xmlName": "verifiedAccessInstance" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateVerifiedAccessTrustProvider": { @@ -19326,6 +19743,9 @@ "smithy.api#xmlName": "verifiedAccessTrustProvider" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateVolume": { @@ -19604,6 +20024,9 @@ "smithy.api#xmlName": "clientToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateVpcEndpointRequest": { @@ -19723,6 +20146,9 @@ "smithy.api#xmlName": "clientToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateVpcEndpointServiceConfiguration": { @@ -19820,6 +20246,9 @@ "smithy.api#xmlName": "clientToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateVpcPeeringConnection": { @@ -19902,6 +20331,9 @@ "smithy.api#xmlName": "vpcPeeringConnection" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateVpcRequest": { @@ -20006,6 +20438,9 @@ "smithy.api#xmlName": "vpc" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateVpnConnection": { @@ -20095,7 +20530,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of CreateVpnConnection.
" + "smithy.api#documentation": "Contains the output of CreateVpnConnection.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreateVpnConnectionRoute": { @@ -20208,7 +20644,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of CreateVpnGateway.
" + "smithy.api#documentation": "Contains the output of CreateVpnGateway.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#CreditSpecification": { @@ -20630,6 +21067,9 @@ "smithy.api#xmlName": "carrierGateway" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteClientVpnEndpoint": { @@ -20679,6 +21119,9 @@ "smithy.api#xmlName": "status" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteClientVpnRoute": { @@ -20742,6 +21185,9 @@ "smithy.api#xmlName": "status" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteCoipCidr": { @@ -20799,6 +21245,9 @@ "smithy.api#xmlName": "coipCidr" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteCoipPool": { @@ -20848,6 +21297,9 @@ "smithy.api#xmlName": "coipPool" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteCustomerGateway": { @@ -20976,6 +21428,9 @@ "smithy.api#xmlName": "returnCode" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteFleetError": { @@ -21170,6 +21625,9 @@ "smithy.api#xmlName": "unsuccessfulFleetDeletionSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteFlowLogs": { @@ -21220,6 +21678,9 @@ "smithy.api#xmlName": "unsuccessful" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteFpgaImage": { @@ -21271,6 +21732,61 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.ec2#DeleteInstanceConnectEndpoint": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#DeleteInstanceConnectEndpointRequest" + }, + "output": { + "target": "com.amazonaws.ec2#DeleteInstanceConnectEndpointResult" + }, + "traits": { + "smithy.api#documentation": "Deletes the specified EC2 Instance Connect Endpoint.
" + } + }, + "com.amazonaws.ec2#DeleteInstanceConnectEndpointRequest": { + "type": "structure", + "members": { + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#default": false, + "smithy.api#documentation": "Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation
. \n Otherwise, it is UnauthorizedOperation
.
The ID of the EC2 Instance Connect Endpoint to delete.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ec2#DeleteInstanceConnectEndpointResult": { + "type": "structure", + "members": { + "InstanceConnectEndpoint": { + "target": "com.amazonaws.ec2#Ec2InstanceConnectEndpoint", + "traits": { + "aws.protocols#ec2QueryName": "InstanceConnectEndpoint", + "smithy.api#documentation": "Information about the EC2 Instance Connect Endpoint.
", + "smithy.api#xmlName": "instanceConnectEndpoint" + } + } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteInstanceEventWindow": { @@ -21329,6 +21845,9 @@ "smithy.api#xmlName": "instanceEventWindowState" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteInternetGateway": { @@ -21430,6 +21949,9 @@ "smithy.api#xmlName": "ipamPool" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteIpamRequest": { @@ -21511,6 +22033,9 @@ "smithy.api#xmlName": "ipamResourceDiscovery" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteIpamResult": { @@ -21524,6 +22049,9 @@ "smithy.api#xmlName": "ipam" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteIpamScope": { @@ -21573,6 +22101,9 @@ "smithy.api#xmlName": "ipamScope" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteKeyPair": { @@ -21668,6 +22199,9 @@ "smithy.api#xmlName": "launchTemplate" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteLaunchTemplateVersions": { @@ -21832,6 +22366,9 @@ "smithy.api#xmlName": "unsuccessfullyDeletedLaunchTemplateVersionSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteLocalGatewayRoute": { @@ -21893,6 +22430,9 @@ "smithy.api#xmlName": "route" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteLocalGatewayRouteTable": { @@ -21942,6 +22482,9 @@ "smithy.api#xmlName": "localGatewayRouteTable" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociation": { @@ -21991,6 +22534,9 @@ "smithy.api#xmlName": "localGatewayRouteTableVirtualInterfaceGroupAssociation" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteLocalGatewayRouteTableVpcAssociation": { @@ -22040,6 +22586,9 @@ "smithy.api#xmlName": "localGatewayRouteTableVpcAssociation" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteManagedPrefixList": { @@ -22089,6 +22638,9 @@ "smithy.api#xmlName": "prefixList" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteNatGateway": { @@ -22139,6 +22691,9 @@ "smithy.api#xmlName": "natGatewayId" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteNetworkAcl": { @@ -22302,6 +22857,9 @@ "smithy.api#xmlName": "networkInsightsAccessScopeAnalysisId" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteNetworkInsightsAccessScopeRequest": { @@ -22339,6 +22897,9 @@ "smithy.api#xmlName": "networkInsightsAccessScopeId" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteNetworkInsightsAnalysis": { @@ -22388,6 +22949,9 @@ "smithy.api#xmlName": "networkInsightsAnalysisId" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteNetworkInsightsPath": { @@ -22437,6 +23001,9 @@ "smithy.api#xmlName": "networkInsightsPathId" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteNetworkInterface": { @@ -22511,7 +23078,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output for DeleteNetworkInterfacePermission.
" + "smithy.api#documentation": "Contains the output for DeleteNetworkInterfacePermission.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteNetworkInterfaceRequest": { @@ -22632,6 +23200,9 @@ "smithy.api#xmlName": "returnValue" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteQueuedReservedInstances": { @@ -22752,6 +23323,9 @@ "smithy.api#xmlName": "failedQueuedPurchaseDeletionSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteRoute": { @@ -23026,6 +23600,9 @@ "smithy.api#xmlName": "deletedSubnetCidrReservation" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteSubnetRequest": { @@ -23149,6 +23726,9 @@ "smithy.api#xmlName": "trafficMirrorFilterId" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteTrafficMirrorFilterRule": { @@ -23198,6 +23778,9 @@ "smithy.api#xmlName": "trafficMirrorFilterRuleId" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteTrafficMirrorSession": { @@ -23247,6 +23830,9 @@ "smithy.api#xmlName": "trafficMirrorSessionId" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteTrafficMirrorTarget": { @@ -23296,6 +23882,9 @@ "smithy.api#xmlName": "trafficMirrorTargetId" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteTransitGateway": { @@ -23369,6 +23958,9 @@ "smithy.api#xmlName": "transitGatewayConnectPeer" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteTransitGatewayConnectRequest": { @@ -23406,6 +23998,9 @@ "smithy.api#xmlName": "transitGatewayConnect" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteTransitGatewayMulticastDomain": { @@ -23455,6 +24050,9 @@ "smithy.api#xmlName": "transitGatewayMulticastDomain" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteTransitGatewayPeeringAttachment": { @@ -23504,6 +24102,9 @@ "smithy.api#xmlName": "transitGatewayPeeringAttachment" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteTransitGatewayPolicyTable": { @@ -23553,6 +24154,9 @@ "smithy.api#xmlName": "transitGatewayPolicyTable" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteTransitGatewayPrefixListReference": { @@ -23610,6 +24214,9 @@ "smithy.api#xmlName": "transitGatewayPrefixListReference" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteTransitGatewayRequest": { @@ -23647,6 +24254,9 @@ "smithy.api#xmlName": "transitGateway" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteTransitGatewayRoute": { @@ -23704,6 +24314,9 @@ "smithy.api#xmlName": "route" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteTransitGatewayRouteTable": { @@ -23765,6 +24378,9 @@ "smithy.api#xmlName": "transitGatewayRouteTableAnnouncement" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteTransitGatewayRouteTableRequest": { @@ -23802,6 +24418,9 @@ "smithy.api#xmlName": "transitGatewayRouteTable" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteTransitGatewayVpcAttachment": { @@ -23851,6 +24470,9 @@ "smithy.api#xmlName": "transitGatewayVpcAttachment" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteVerifiedAccessEndpoint": { @@ -23907,6 +24529,9 @@ "smithy.api#xmlName": "verifiedAccessEndpoint" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteVerifiedAccessGroup": { @@ -23963,6 +24588,9 @@ "smithy.api#xmlName": "verifiedAccessGroup" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteVerifiedAccessInstance": { @@ -24019,6 +24647,9 @@ "smithy.api#xmlName": "verifiedAccessInstance" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteVerifiedAccessTrustProvider": { @@ -24075,6 +24706,9 @@ "smithy.api#xmlName": "verifiedAccessTrustProvider" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteVolume": { @@ -24175,6 +24809,9 @@ "smithy.api#xmlName": "unsuccessful" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteVpcEndpointServiceConfigurations": { @@ -24225,6 +24862,9 @@ "smithy.api#xmlName": "unsuccessful" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteVpcEndpoints": { @@ -24275,6 +24915,9 @@ "smithy.api#xmlName": "unsuccessful" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteVpcPeeringConnection": { @@ -24330,6 +24973,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeleteVpcRequest": { @@ -24520,6 +25166,9 @@ "smithy.api#xmlName": "byoipCidr" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeprovisionIpamPoolCidr": { @@ -24575,6 +25224,9 @@ "smithy.api#xmlName": "ipamPoolCidr" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeprovisionPublicIpv4PoolCidr": { @@ -24640,6 +25292,9 @@ "smithy.api#xmlName": "deprovisionedAddressSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeprovisionedAddressSet": { @@ -24716,7 +25371,9 @@ "InstanceTagAttribute": { "target": "com.amazonaws.ec2#DeregisterInstanceTagAttributeRequest", "traits": { - "smithy.api#documentation": "Information about the tag keys to deregister.
" + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "Information about the tag keys to deregister.
", + "smithy.api#required": {} } } }, @@ -24735,6 +25392,9 @@ "smithy.api#xmlName": "instanceTagAttribute" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeregisterInstanceTagAttributeRequest": { @@ -24817,6 +25477,9 @@ "smithy.api#xmlName": "deregisteredMulticastGroupMembers" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DeregisterTransitGatewayMulticastGroupSources": { @@ -24876,6 +25539,9 @@ "smithy.api#xmlName": "deregisteredMulticastGroupSources" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeAccountAttributes": { @@ -24927,6 +25593,9 @@ "smithy.api#xmlName": "accountAttributeSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeAddressTransfers": { @@ -24938,7 +25607,7 @@ "target": "com.amazonaws.ec2#DescribeAddressTransfersResult" }, "traits": { - "smithy.api#documentation": "Describes an Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon Virtual Private Cloud User Guide.
", + "smithy.api#documentation": "Describes an Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon Virtual Private Cloud User Guide.
\nWhen you transfer an Elastic IP address, there is a two-step handshake\n between the source and transfer Amazon Web Services accounts. When the source account starts the transfer,\n the transfer account has seven days to accept the Elastic IP address\n transfer. During those seven days, the source account can view the\n pending transfer by using this action. After seven days, the\n transfer expires and ownership of the Elastic IP\n address returns to the source\n account. Accepted transfers are visible to the source account for three days\n after the transfers have been accepted.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -25013,6 +25682,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeAddresses": { @@ -25024,7 +25696,7 @@ "target": "com.amazonaws.ec2#DescribeAddressesResult" }, "traits": { - "smithy.api#documentation": "Describes the specified Elastic IP addresses or all of your Elastic IP addresses.
\nAn Elastic IP address is for use in either the EC2-Classic platform or in a VPC.\n\t\t\t\tFor more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.
\nWe are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide.
\nDescribes the specified Elastic IP addresses or all of your Elastic IP addresses.
" } }, "com.amazonaws.ec2#DescribeAddressesAttribute": { @@ -25107,6 +25779,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeAddressesRequest": { @@ -25115,7 +25790,7 @@ "Filters": { "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "One or more filters. Filter names and values are case-sensitive.
\n\n allocation-id
- [EC2-VPC] The allocation ID for the address.
\n association-id
- [EC2-VPC] The association ID for the address.
\n domain
- Indicates whether the address is for use in EC2-Classic (standard
) \n or in a VPC (vpc
).
\n instance-id
- The ID of the instance the address is associated with, if any.
\n network-border-group
- A unique set of Availability Zones, Local Zones,\n or Wavelength Zones from where Amazon Web Services advertises IP addresses.
\n network-interface-id
- [EC2-VPC] The ID of the network interface that the address is associated with, if any.
\n network-interface-owner-id
- The Amazon Web Services account ID of the owner.
\n private-ip-address
- [EC2-VPC] The private IP address associated with the Elastic IP address.
\n public-ip
- The Elastic IP address, or the carrier IP address.
\n tag
:Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
\n tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
One or more filters. Filter names and values are case-sensitive.
\n\n allocation-id
- The allocation ID for the address.
\n association-id
- The association ID for the address.
\n instance-id
- The ID of the instance the address is associated with, if any.
\n network-border-group
- A unique set of Availability Zones, Local Zones,\n or Wavelength Zones from where Amazon Web Services advertises IP addresses.
\n network-interface-id
- The ID of the network interface that the address is associated with, if any.
\n network-interface-owner-id
- The Amazon Web Services account ID of the owner.
\n private-ip-address
- The private IP address associated with the Elastic IP address.
\n public-ip
- The Elastic IP address, or the carrier IP address.
\n tag
:Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
\n tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
[EC2-VPC] Information about the allocation IDs.
", + "smithy.api#documentation": "Information about the allocation IDs.
", "smithy.api#xmlName": "AllocationId" } }, @@ -25159,6 +25834,9 @@ "smithy.api#xmlName": "addressesSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeAggregateIdFormat": { @@ -25210,6 +25888,9 @@ "smithy.api#xmlName": "statusSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeAvailabilityZones": { @@ -25282,6 +25963,9 @@ "smithy.api#xmlName": "availabilityZoneInfo" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeAwsNetworkPerformanceMetricSubscriptions": { @@ -25358,6 +26042,9 @@ "smithy.api#xmlName": "subscriptionSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeBundleTasks": { @@ -25442,6 +26129,9 @@ "smithy.api#xmlName": "bundleInstanceTasksSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeByoipCidrs": { @@ -25522,6 +26212,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeCapacityReservationFleets": { @@ -25615,6 +26308,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeCapacityReservations": { @@ -25708,6 +26404,9 @@ "smithy.api#xmlName": "capacityReservationSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeCarrierGateways": { @@ -25791,6 +26490,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeClassicLinkInstances": { @@ -25890,6 +26592,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeClientVpnAuthorizationRules": { @@ -25984,6 +26689,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeClientVpnConnections": { @@ -26078,6 +26786,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeClientVpnEndpointMaxResults": { @@ -26171,6 +26882,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeClientVpnRoutes": { @@ -26265,6 +26979,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeClientVpnTargetNetworks": { @@ -26365,6 +27082,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeCoipPools": { @@ -26448,6 +27168,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeConversionTaskList": { @@ -26575,6 +27298,9 @@ "smithy.api#xmlName": "conversionTasks" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeCustomerGateways": { @@ -26672,7 +27398,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of DescribeCustomerGateways.
" + "smithy.api#documentation": "Contains the output of DescribeCustomerGateways.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeDhcpOptions": { @@ -26768,6 +27495,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeEgressOnlyInternetGateways": { @@ -26861,6 +27591,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeElasticGpus": { @@ -26958,6 +27691,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeExportImageTasks": { @@ -27051,6 +27787,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeExportTasks": { @@ -27131,6 +27870,9 @@ "smithy.api#xmlName": "exportTaskSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeFastLaunchImages": { @@ -27221,6 +27963,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeFastLaunchImagesSuccessItem": { @@ -27500,6 +28245,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeFleetError": { @@ -27649,6 +28397,9 @@ "smithy.api#xmlName": "startTime" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeFleetInstances": { @@ -27735,6 +28486,9 @@ "smithy.api#xmlName": "fleetId" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeFleets": { @@ -27884,6 +28638,9 @@ "smithy.api#xmlName": "fleetSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeFlowLogs": { @@ -27966,6 +28723,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeFpgaImageAttribute": { @@ -28023,6 +28783,9 @@ "smithy.api#xmlName": "fpgaImageAttribute" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeFpgaImages": { @@ -28123,6 +28886,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeHostReservationOfferings": { @@ -28212,6 +28978,9 @@ "smithy.api#xmlName": "offeringSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeHostReservations": { @@ -28295,6 +29064,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeHosts": { @@ -28376,6 +29148,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeIamInstanceProfileAssociations": { @@ -28461,6 +29236,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeIdFormat": { @@ -28500,6 +29278,9 @@ "smithy.api#xmlName": "statusSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeIdentityIdFormat": { @@ -28551,6 +29332,9 @@ "smithy.api#xmlName": "statusSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeImageAttribute": { @@ -28756,6 +29540,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeImportImageTasks": { @@ -28839,6 +29626,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeImportSnapshotTasks": { @@ -28949,6 +29739,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeInstanceAttribute": { @@ -29001,6 +29794,92 @@ "smithy.api#input": {} } }, + "com.amazonaws.ec2#DescribeInstanceConnectEndpoints": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#DescribeInstanceConnectEndpointsRequest" + }, + "output": { + "target": "com.amazonaws.ec2#DescribeInstanceConnectEndpointsResult" + }, + "traits": { + "smithy.api#documentation": "Describes the specified EC2 Instance Connect Endpoints or all EC2 Instance Connect Endpoints.
", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "InstanceConnectEndpoints", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.ec2#DescribeInstanceConnectEndpointsRequest": { + "type": "structure", + "members": { + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#default": false, + "smithy.api#documentation": "Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation
. \n Otherwise, it is UnauthorizedOperation
.
The maximum number of items to return for this request.\n To get the next page of items, make another request with the token returned in the output.\n\t For more information, see Pagination.
" + } + }, + "NextToken": { + "target": "com.amazonaws.ec2#NextToken", + "traits": { + "smithy.api#documentation": "The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.
" + } + }, + "Filters": { + "target": "com.amazonaws.ec2#FilterList", + "traits": { + "smithy.api#documentation": "One or more filters.
\n\n instance-connect-endpoint-id
- The ID of the EC2 Instance Connect Endpoint.
\n state
- The state of the EC2 Instance Connect Endpoint (create-in-progress
| create-complete
| create-failed
| \n delete-in-progress
| delete-complete
| delete-failed
).
\n subnet-id
- The ID of the subnet in which the EC2 Instance\n Connect Endpoint was created.
\n tag
:Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
\n tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
\n tag-value
- The value of a tag assigned to the resource. Use this filter to find all resources \n that have a tag with a specific value, regardless of tag key.
\n vpc-id
- The ID of the VPC in which the EC2 Instance Connect\n Endpoint was created.
One or more EC2 Instance Connect Endpoint IDs.
", + "smithy.api#xmlName": "InstanceConnectEndpointId" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ec2#DescribeInstanceConnectEndpointsResult": { + "type": "structure", + "members": { + "InstanceConnectEndpoints": { + "target": "com.amazonaws.ec2#InstanceConnectEndpointSet", + "traits": { + "aws.protocols#ec2QueryName": "InstanceConnectEndpointSet", + "smithy.api#documentation": "Information about the EC2 Instance Connect Endpoints.
", + "smithy.api#xmlName": "instanceConnectEndpointSet" + } + }, + "NextToken": { + "target": "com.amazonaws.ec2#NextToken", + "traits": { + "aws.protocols#ec2QueryName": "NextToken", + "smithy.api#documentation": "The token to include in another request to get the next page of items. This value is null
when there\n are no more items to return.
Describes your Elastic IP addresses that are being moved to the EC2-VPC platform, or that are being restored to the EC2-Classic platform. This request does not return information about any other Elastic IP addresses in your account.
", + "smithy.api#documentation": "This action is deprecated.
\nDescribes your Elastic IP addresses that are being moved from or being restored to the EC2-Classic platform. \n This request does not return information about any other Elastic IP addresses in your account.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -31365,6 +32316,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeNatGateways": { @@ -31534,6 +32488,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeNetworkAcls": { @@ -31629,6 +32586,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeNetworkInsightsAccessScopeAnalyses": { @@ -31730,6 +32690,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeNetworkInsightsAccessScopes": { @@ -31813,6 +32776,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeNetworkInsightsAnalyses": { @@ -31914,6 +32880,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeNetworkInsightsPaths": { @@ -31997,6 +32966,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeNetworkInterfaceAttribute": { @@ -32093,7 +33065,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of DescribeNetworkInterfaceAttribute.
" + "smithy.api#documentation": "Contains the output of DescribeNetworkInterfaceAttribute.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeNetworkInterfacePermissions": { @@ -32182,7 +33155,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output for DescribeNetworkInterfacePermissions.
" + "smithy.api#documentation": "Contains the output for DescribeNetworkInterfacePermissions.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeNetworkInterfaces": { @@ -32306,6 +33280,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribePlacementGroups": { @@ -32371,6 +33348,9 @@ "smithy.api#xmlName": "placementGroupSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribePrefixLists": { @@ -32454,6 +33434,9 @@ "smithy.api#xmlName": "prefixListSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribePrincipalIdFormat": { @@ -32540,6 +33523,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribePublicIpv4Pools": { @@ -32615,6 +33601,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeRegions": { @@ -32680,6 +33669,9 @@ "smithy.api#xmlName": "regionInfo" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeReplaceRootVolumeTasks": { @@ -32773,6 +33765,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeReservedInstances": { @@ -32844,7 +33839,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of DescribeReservedInstancesListings.
" + "smithy.api#documentation": "Contains the output of DescribeReservedInstancesListings.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeReservedInstancesModifications": { @@ -32916,7 +33912,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of DescribeReservedInstancesModifications.
" + "smithy.api#documentation": "Contains the output of DescribeReservedInstancesModifications.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeReservedInstancesOfferings": { @@ -33081,7 +34078,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of DescribeReservedInstancesOfferings.
" + "smithy.api#documentation": "Contains the output of DescribeReservedInstancesOfferings.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeReservedInstancesRequest": { @@ -33144,7 +34142,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output for DescribeReservedInstances.
" + "smithy.api#documentation": "Contains the output for DescribeReservedInstances.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeRouteTables": { @@ -33242,7 +34241,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of DescribeRouteTables.
" + "smithy.api#documentation": "Contains the output of DescribeRouteTables.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeScheduledInstanceAvailability": { @@ -33364,7 +34364,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of DescribeScheduledInstanceAvailability.
" + "smithy.api#documentation": "Contains the output of DescribeScheduledInstanceAvailability.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeScheduledInstances": { @@ -33457,7 +34458,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of DescribeScheduledInstances.
" + "smithy.api#documentation": "Contains the output of DescribeScheduledInstances.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeSecurityGroupReferences": { @@ -33507,6 +34509,9 @@ "smithy.api#xmlName": "securityGroupReferenceSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeSecurityGroupRules": { @@ -33600,6 +34605,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeSecurityGroups": { @@ -33728,6 +34736,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeSnapshotAttribute": { @@ -33803,6 +34814,9 @@ "smithy.api#xmlName": "snapshotId" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeSnapshotTierStatus": { @@ -33880,6 +34894,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeSnapshots": { @@ -34006,6 +35023,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeSpotDatafeedSubscription": { @@ -34052,7 +35072,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of DescribeSpotDatafeedSubscription.
" + "smithy.api#documentation": "Contains the output of DescribeSpotDatafeedSubscription.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeSpotFleetInstances": { @@ -34543,7 +35564,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of DescribeSpotInstanceRequests.
" + "smithy.api#documentation": "Contains the output of DescribeSpotInstanceRequests.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeSpotPriceHistory": { @@ -34667,7 +35689,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of DescribeSpotPriceHistory.
" + "smithy.api#documentation": "Contains the output of DescribeSpotPriceHistory.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeStaleSecurityGroups": { @@ -34764,6 +35787,9 @@ "smithy.api#xmlName": "staleSecurityGroupSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeStoreImageTasks": { @@ -34857,6 +35883,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeSubnets": { @@ -34969,6 +35998,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeTags": { @@ -35051,6 +36083,9 @@ "smithy.api#xmlName": "tagSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeTrafficMirrorFilters": { @@ -35134,6 +36169,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeTrafficMirrorSessions": { @@ -35217,6 +36255,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeTrafficMirrorTargets": { @@ -35300,6 +36341,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeTransitGatewayAttachments": { @@ -35382,6 +36426,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeTransitGatewayConnectPeers": { @@ -35464,6 +36511,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeTransitGatewayConnects": { @@ -35546,6 +36596,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeTransitGatewayMulticastDomains": { @@ -35628,6 +36681,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeTransitGatewayPeeringAttachments": { @@ -35710,6 +36766,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeTransitGatewayPolicyTables": { @@ -35792,6 +36851,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeTransitGatewayRouteTableAnnouncements": { @@ -35874,6 +36936,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeTransitGatewayRouteTables": { @@ -35956,6 +37021,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeTransitGatewayVpcAttachments": { @@ -36038,6 +37106,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeTransitGateways": { @@ -36120,6 +37191,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeTrunkInterfaceAssociations": { @@ -36213,6 +37287,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeVerifiedAccessEndpoints": { @@ -36318,6 +37395,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeVerifiedAccessGroupMaxResults": { @@ -36417,6 +37497,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeVerifiedAccessInstanceLoggingConfigurations": { @@ -36510,6 +37593,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeVerifiedAccessInstances": { @@ -36603,6 +37689,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeVerifiedAccessTrustProviders": { @@ -36696,6 +37785,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeVolumeAttribute": { @@ -36771,6 +37863,9 @@ "smithy.api#xmlName": "volumeId" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeVolumeStatus": { @@ -36856,6 +37951,9 @@ "smithy.api#xmlName": "volumeStatusSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeVolumes": { @@ -37033,6 +38131,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeVolumesRequest": { @@ -37104,6 +38205,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeVpcAttribute": { @@ -37187,6 +38291,9 @@ "smithy.api#xmlName": "enableNetworkAddressUsageMetrics" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeVpcClassicLink": { @@ -37290,6 +38397,9 @@ "smithy.api#xmlName": "vpcs" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeVpcClassicLinkRequest": { @@ -37335,6 +38445,9 @@ "smithy.api#xmlName": "vpcSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeVpcEndpointConnectionNotifications": { @@ -37417,6 +38530,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeVpcEndpointConnections": { @@ -37493,6 +38609,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeVpcEndpointServiceConfigurations": { @@ -37576,6 +38695,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeVpcEndpointServicePermissions": { @@ -37660,6 +38782,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeVpcEndpointServices": { @@ -37745,6 +38870,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeVpcEndpoints": { @@ -37828,6 +38956,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeVpcPeeringConnections": { @@ -37966,6 +39097,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeVpcs": { @@ -38098,6 +39232,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeVpnConnections": { @@ -38220,7 +39357,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of DescribeVpnConnections.
" + "smithy.api#documentation": "Contains the output of DescribeVpnConnections.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#DescribeVpnGateways": { @@ -38281,7 +39419,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of DescribeVpnGateways.
" + "smithy.api#documentation": "Contains the output of DescribeVpnGateways.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#DestinationFileFormat": { @@ -38430,6 +39569,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DetachInternetGateway": { @@ -38603,6 +39745,9 @@ "smithy.api#xmlName": "verifiedAccessInstance" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DetachVolume": { @@ -38940,6 +40085,9 @@ "smithy.api#xmlName": "addressTransfer" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DisableAwsNetworkPerformanceMetricSubscription": { @@ -39007,6 +40155,9 @@ "smithy.api#xmlName": "output" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DisableEbsEncryptionByDefault": { @@ -39050,6 +40201,9 @@ "smithy.api#xmlName": "ebsEncryptionByDefault" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DisableFastLaunch": { @@ -39173,6 +40327,9 @@ "smithy.api#xmlName": "stateTransitionTime" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DisableFastSnapshotRestoreErrorItem": { @@ -39435,6 +40592,9 @@ "smithy.api#xmlName": "unsuccessful" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DisableImageDeprecation": { @@ -39486,6 +40646,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DisableIpamOrganizationAdminAccount": { @@ -39537,6 +40700,9 @@ "smithy.api#xmlName": "success" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DisableSerialConsoleAccess": { @@ -39580,6 +40746,9 @@ "smithy.api#xmlName": "serialConsoleAccessEnabled" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DisableTransitGatewayRouteTablePropagation": { @@ -39641,6 +40810,9 @@ "smithy.api#xmlName": "propagation" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DisableVgwRoutePropagation": { @@ -39740,6 +40912,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DisableVpcClassicLinkRequest": { @@ -39783,6 +40958,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DisassociateAddress": { @@ -39794,7 +40972,7 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "Disassociates an Elastic IP address from the instance or network interface it's associated with.
\nAn Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more\n\t\t\tinformation, see Elastic IP\n\t\t\t\tAddresses in the Amazon Elastic Compute Cloud User Guide.
\nWe are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide.
\nThis is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.
" + "smithy.api#documentation": "Disassociates an Elastic IP address from the instance or network interface it's associated with.
\nThis is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.
" } }, "com.amazonaws.ec2#DisassociateAddressRequest": { @@ -39803,13 +40981,13 @@ "AssociationId": { "target": "com.amazonaws.ec2#ElasticIpAssociationId", "traits": { - "smithy.api#documentation": "[EC2-VPC] The association ID. Required for EC2-VPC.
" + "smithy.api#documentation": "The association ID. This parameter is required.
" } }, "PublicIp": { "target": "com.amazonaws.ec2#EipAllocationPublicIp", "traits": { - "smithy.api#documentation": "[EC2-Classic] The Elastic IP address. Required for EC2-Classic.
" + "smithy.api#documentation": "Deprecated.
" } }, "DryRun": { @@ -39890,6 +41068,9 @@ "smithy.api#xmlName": "status" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DisassociateEnclaveCertificateIamRole": { @@ -39949,6 +41130,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DisassociateIamInstanceProfile": { @@ -39990,6 +41174,9 @@ "smithy.api#xmlName": "iamInstanceProfileAssociation" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DisassociateInstanceEventWindow": { @@ -40047,6 +41234,9 @@ "smithy.api#xmlName": "instanceEventWindow" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DisassociateIpamResourceDiscovery": { @@ -40096,6 +41286,9 @@ "smithy.api#xmlName": "ipamResourceDiscoveryAssociation" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DisassociateNatGatewayAddress": { @@ -40170,6 +41363,9 @@ "smithy.api#xmlName": "natGatewayAddressSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DisassociateRouteTable": { @@ -40261,6 +41457,9 @@ "smithy.api#xmlName": "subnetId" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DisassociateTransitGatewayMulticastDomain": { @@ -40326,6 +41525,9 @@ "smithy.api#xmlName": "associations" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DisassociateTransitGatewayPolicyTable": { @@ -40383,6 +41585,9 @@ "smithy.api#xmlName": "association" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DisassociateTransitGatewayRouteTable": { @@ -40440,6 +41645,9 @@ "smithy.api#xmlName": "association" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DisassociateTrunkInterface": { @@ -40506,6 +41714,9 @@ "smithy.api#xmlName": "clientToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DisassociateVpcCidrBlock": { @@ -40565,6 +41776,9 @@ "smithy.api#xmlName": "vpcId" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#DiskCount": { @@ -41341,6 +42555,177 @@ } } }, + "com.amazonaws.ec2#Ec2InstanceConnectEndpoint": { + "type": "structure", + "members": { + "OwnerId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "OwnerId", + "smithy.api#documentation": "The ID of the Amazon Web Services account that created the EC2 Instance Connect Endpoint.
", + "smithy.api#xmlName": "ownerId" + } + }, + "InstanceConnectEndpointId": { + "target": "com.amazonaws.ec2#InstanceConnectEndpointId", + "traits": { + "aws.protocols#ec2QueryName": "InstanceConnectEndpointId", + "smithy.api#documentation": "The ID of the EC2 Instance Connect Endpoint.
", + "smithy.api#xmlName": "instanceConnectEndpointId" + } + }, + "InstanceConnectEndpointArn": { + "target": "com.amazonaws.ec2#ResourceArn", + "traits": { + "aws.protocols#ec2QueryName": "InstanceConnectEndpointArn", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the EC2 Instance Connect Endpoint.
", + "smithy.api#xmlName": "instanceConnectEndpointArn" + } + }, + "State": { + "target": "com.amazonaws.ec2#Ec2InstanceConnectEndpointState", + "traits": { + "aws.protocols#ec2QueryName": "State", + "smithy.api#documentation": "The current state of the EC2 Instance Connect Endpoint.
", + "smithy.api#xmlName": "state" + } + }, + "StateMessage": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "StateMessage", + "smithy.api#documentation": "The message for the current state of the EC2 Instance Connect Endpoint. \n Can include a failure message.
", + "smithy.api#xmlName": "stateMessage" + } + }, + "DnsName": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "DnsName", + "smithy.api#documentation": "The DNS name of the EC2 Instance Connect Endpoint.
", + "smithy.api#xmlName": "dnsName" + } + }, + "FipsDnsName": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "FipsDnsName", + "smithy.api#documentation": "", + "smithy.api#xmlName": "fipsDnsName" + } + }, + "NetworkInterfaceIds": { + "target": "com.amazonaws.ec2#NetworkInterfaceIdSet", + "traits": { + "aws.protocols#ec2QueryName": "NetworkInterfaceIdSet", + "smithy.api#documentation": "The ID of the elastic network interface that Amazon EC2 automatically created when creating the EC2\n Instance Connect Endpoint.
", + "smithy.api#xmlName": "networkInterfaceIdSet" + } + }, + "VpcId": { + "target": "com.amazonaws.ec2#VpcId", + "traits": { + "aws.protocols#ec2QueryName": "VpcId", + "smithy.api#documentation": "The ID of the VPC in which the EC2 Instance Connect Endpoint was created.
", + "smithy.api#xmlName": "vpcId" + } + }, + "AvailabilityZone": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "AvailabilityZone", + "smithy.api#documentation": "The Availability Zone of the EC2 Instance Connect Endpoint.
", + "smithy.api#xmlName": "availabilityZone" + } + }, + "CreatedAt": { + "target": "com.amazonaws.ec2#MillisecondDateTime", + "traits": { + "aws.protocols#ec2QueryName": "CreatedAt", + "smithy.api#documentation": "The date and time that the EC2 Instance Connect Endpoint was created.
", + "smithy.api#xmlName": "createdAt" + } + }, + "SubnetId": { + "target": "com.amazonaws.ec2#SubnetId", + "traits": { + "aws.protocols#ec2QueryName": "SubnetId", + "smithy.api#documentation": "The ID of the subnet in which the EC2 Instance Connect Endpoint was created.
", + "smithy.api#xmlName": "subnetId" + } + }, + "PreserveClientIp": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "PreserveClientIp", + "smithy.api#clientOptional": {}, + "smithy.api#default": false, + "smithy.api#documentation": "Indicates whether your client's IP address is preserved as the source. The value is true
or false
.
If true
, your client's IP address is used when you connect to a resource.
If false
, the elastic network interface IP address is used when you connect to a resource.
Default: true
\n
The security groups associated with the endpoint. If you didn't specify a security group, \n the default security group for your VPC is associated with the endpoint.
", + "smithy.api#xmlName": "securityGroupIdSet" + } + }, + "Tags": { + "target": "com.amazonaws.ec2#TagList", + "traits": { + "aws.protocols#ec2QueryName": "TagSet", + "smithy.api#documentation": "The tags assigned to the EC2 Instance Connect Endpoint.
", + "smithy.api#xmlName": "tagSet" + } + } + }, + "traits": { + "smithy.api#documentation": "The EC2 Instance Connect Endpoint.
" + } + }, + "com.amazonaws.ec2#Ec2InstanceConnectEndpointState": { + "type": "enum", + "members": { + "create_in_progress": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "create-in-progress" + } + }, + "create_complete": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "create-complete" + } + }, + "create_failed": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "create-failed" + } + }, + "delete_in_progress": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "delete-in-progress" + } + }, + "delete_complete": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "delete-complete" + } + }, + "delete_failed": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "delete-failed" + } + } + } + }, "com.amazonaws.ec2#EfaInfo": { "type": "structure", "members": { @@ -41875,6 +43260,9 @@ "smithy.api#xmlName": "addressTransfer" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#EnableAwsNetworkPerformanceMetricSubscription": { @@ -41942,6 +43330,9 @@ "smithy.api#xmlName": "output" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#EnableEbsEncryptionByDefault": { @@ -41985,6 +43376,9 @@ "smithy.api#xmlName": "ebsEncryptionByDefault" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#EnableFastLaunch": { @@ -42126,6 +43520,9 @@ "smithy.api#xmlName": "stateTransitionTime" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#EnableFastSnapshotRestoreErrorItem": { @@ -42388,6 +43785,9 @@ "smithy.api#xmlName": "unsuccessful" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#EnableImageDeprecation": { @@ -42447,6 +43847,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#EnableIpamOrganizationAdminAccount": { @@ -42498,6 +43901,9 @@ "smithy.api#xmlName": "success" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#EnableReachabilityAnalyzerOrganizationSharing": { @@ -42541,6 +43947,9 @@ "smithy.api#xmlName": "returnValue" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#EnableSerialConsoleAccess": { @@ -42584,6 +43993,9 @@ "smithy.api#xmlName": "serialConsoleAccessEnabled" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#EnableTransitGatewayRouteTablePropagation": { @@ -42645,6 +44057,9 @@ "smithy.api#xmlName": "propagation" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#EnableVgwRoutePropagation": { @@ -42784,6 +44199,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#EnableVpcClassicLinkRequest": { @@ -42827,6 +44245,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#EnclaveOptions": { @@ -43573,6 +44994,9 @@ "smithy.api#xmlName": "status" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ExportClientVpnClientConfiguration": { @@ -43622,6 +45046,9 @@ "smithy.api#xmlName": "clientConfiguration" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ExportEnvironment": { @@ -43808,6 +45235,9 @@ "smithy.api#xmlName": "tagSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ExportImageTask": { @@ -44205,6 +45635,9 @@ "smithy.api#xmlName": "s3Location" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ExportVmTaskId": { @@ -46328,6 +47761,9 @@ "smithy.api#xmlName": "associatedRoleSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetAssociatedIpv6PoolCidrs": { @@ -46405,6 +47841,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetAwsNetworkPerformanceData": { @@ -46493,6 +47932,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetCapacityReservationUsage": { @@ -46618,6 +48060,9 @@ "smithy.api#xmlName": "instanceUsageSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetCoipPoolUsage": { @@ -46704,6 +48149,9 @@ "smithy.api#xmlName": "localGatewayRouteTableId" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetConsoleOutput": { @@ -46779,6 +48227,9 @@ "smithy.api#xmlName": "timestamp" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetConsoleScreenshot": { @@ -46844,6 +48295,9 @@ "smithy.api#xmlName": "instanceId" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetDefaultCreditSpecification": { @@ -46893,6 +48347,9 @@ "smithy.api#xmlName": "instanceFamilyCreditSpecification" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetEbsDefaultKmsKeyId": { @@ -46934,6 +48391,9 @@ "smithy.api#xmlName": "kmsKeyId" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetEbsEncryptionByDefault": { @@ -46977,6 +48437,9 @@ "smithy.api#xmlName": "ebsEncryptionByDefault" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetFlowLogsIntegrationTemplate": { @@ -47043,6 +48506,9 @@ "smithy.api#xmlName": "result" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetGroupsForCapacityReservation": { @@ -47130,6 +48596,9 @@ "smithy.api#xmlName": "capacityReservationGroupSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetHostReservationPurchasePreview": { @@ -47203,6 +48672,9 @@ "smithy.api#xmlName": "totalUpfrontPrice" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetInstanceTypesFromInstanceRequirements": { @@ -47298,6 +48770,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetInstanceUefiData": { @@ -47356,6 +48831,9 @@ "smithy.api#xmlName": "uefiData" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetIpamAddressHistory": { @@ -47459,6 +48937,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetIpamDiscoveredAccounts": { @@ -47551,6 +49032,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetIpamDiscoveredResourceCidrs": { @@ -47643,6 +49127,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetIpamPoolAllocations": { @@ -47654,7 +49141,7 @@ "target": "com.amazonaws.ec2#GetIpamPoolAllocationsResult" }, "traits": { - "smithy.api#documentation": "Get a list of all the CIDR allocations in an IPAM pool.
\nIf you use this action after AllocateIpamPoolCidr or ReleaseIpamPoolAllocation, note that all EC2 API actions follow an eventual consistency model.
\nGet a list of all the CIDR allocations in an IPAM pool. The Region you use should be the IPAM pool locale. The locale is the Amazon Web Services Region where this IPAM pool is available for allocations.
\nIf you use this action after AllocateIpamPoolCidr or ReleaseIpamPoolAllocation, note that all EC2 API actions follow an eventual consistency model.
\nContains the output of GetReservedInstancesExchangeQuote.
" + "smithy.api#documentation": "Contains the output of GetReservedInstancesExchangeQuote.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetSerialConsoleAccessStatus": { @@ -48558,6 +50073,9 @@ "smithy.api#xmlName": "serialConsoleAccessEnabled" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetSpotPlacementScores": { @@ -48670,6 +50188,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetSubnetCidrReservations": { @@ -48766,6 +50287,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetTransitGatewayAttachmentPropagations": { @@ -48850,6 +50374,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetTransitGatewayMulticastDomainAssociations": { @@ -48934,6 +50461,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetTransitGatewayPolicyTableAssociations": { @@ -49018,6 +50548,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetTransitGatewayPolicyTableEntries": { @@ -49088,6 +50621,9 @@ "smithy.api#xmlName": "transitGatewayPolicyTableEntries" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetTransitGatewayPrefixListReferences": { @@ -49172,6 +50708,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetTransitGatewayRouteTableAssociations": { @@ -49256,6 +50795,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetTransitGatewayRouteTablePropagations": { @@ -49340,6 +50882,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetVerifiedAccessEndpointPolicy": { @@ -49399,6 +50944,9 @@ "smithy.api#xmlName": "policyDocument" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetVerifiedAccessGroupPolicy": { @@ -49458,6 +51006,9 @@ "smithy.api#xmlName": "policyDocument" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetVpnConnectionDeviceSampleConfiguration": { @@ -49521,6 +51072,9 @@ "smithy.api#xmlName": "vpnConnectionDeviceSampleConfiguration" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetVpnConnectionDeviceTypes": { @@ -49588,6 +51142,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GetVpnTunnelReplacementStatus": { @@ -49685,6 +51242,9 @@ "smithy.api#xmlName": "maintenanceDetails" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#GpuDeviceCount": { @@ -51566,6 +53126,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ImportImage": { @@ -51871,6 +53434,9 @@ "smithy.api#xmlName": "usageOperation" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ImportImageTask": { @@ -52202,6 +53768,9 @@ "smithy.api#xmlName": "conversionTask" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ImportInstanceTaskDetails": { @@ -52411,6 +53980,9 @@ "smithy.api#xmlName": "tagSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ImportManifestUrl": { @@ -52533,6 +54105,9 @@ "smithy.api#xmlName": "tagSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ImportSnapshotTask": { @@ -52687,6 +54262,9 @@ "smithy.api#xmlName": "conversionTask" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ImportVolumeTaskDetails": { @@ -53680,6 +55258,28 @@ "smithy.api#documentation": "Information about the number of instances that can be launched onto the Dedicated\n Host.
" } }, + "com.amazonaws.ec2#InstanceConnectEndpointId": { + "type": "string" + }, + "com.amazonaws.ec2#InstanceConnectEndpointMaxResults": { + "type": "integer", + "traits": { + "smithy.api#default": 0, + "smithy.api#range": { + "min": 1, + "max": 50 + } + } + }, + "com.amazonaws.ec2#InstanceConnectEndpointSet": { + "type": "list", + "member": { + "target": "com.amazonaws.ec2#Ec2InstanceConnectEndpoint", + "traits": { + "smithy.api#xmlName": "item" + } + } + }, "com.amazonaws.ec2#InstanceCount": { "type": "structure", "members": { @@ -65563,6 +67163,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ListSnapshotsInRecycleBin": { @@ -65646,6 +67249,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ListingState": { @@ -67074,6 +68680,9 @@ "smithy.api#xmlName": "address" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyAvailabilityZoneGroup": { @@ -67133,6 +68742,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyAvailabilityZoneOptInStatus": { @@ -67235,6 +68847,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyCapacityReservationRequest": { @@ -67308,6 +68923,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyClientVpnEndpoint": { @@ -67438,6 +69056,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyDefaultCreditSpecification": { @@ -67495,6 +69116,9 @@ "smithy.api#xmlName": "instanceFamilyCreditSpecification" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyEbsDefaultKmsKeyId": { @@ -67544,6 +69168,9 @@ "smithy.api#xmlName": "kmsKeyId" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyFleet": { @@ -67620,6 +69247,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyFpgaImageAttribute": { @@ -67720,6 +69350,9 @@ "smithy.api#xmlName": "fpgaImageAttribute" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyHosts": { @@ -67803,6 +69436,9 @@ "smithy.api#xmlName": "unsuccessful" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyIdFormat": { @@ -68215,6 +69851,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyInstanceCreditSpecification": { @@ -68279,6 +69918,9 @@ "smithy.api#xmlName": "unsuccessfulInstanceCreditSpecificationSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyInstanceEventStartTime": { @@ -68344,6 +69986,9 @@ "smithy.api#xmlName": "event" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyInstanceEventWindow": { @@ -68412,6 +70057,9 @@ "smithy.api#xmlName": "instanceEventWindow" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyInstanceMaintenanceOptions": { @@ -68475,6 +70123,9 @@ "smithy.api#xmlName": "autoRecovery" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyInstanceMetadataOptions": { @@ -68564,6 +70215,9 @@ "smithy.api#xmlName": "instanceMetadataOptions" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyInstancePlacement": { @@ -68659,6 +70313,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyIpam": { @@ -68780,6 +70437,9 @@ "smithy.api#xmlName": "ipamPool" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyIpamRequest": { @@ -68912,6 +70572,9 @@ "smithy.api#xmlName": "ipamResourceCidr" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyIpamResourceDiscovery": { @@ -68981,6 +70644,9 @@ "smithy.api#xmlName": "ipamResourceDiscovery" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyIpamResult": { @@ -68994,6 +70660,9 @@ "smithy.api#xmlName": "ipam" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyIpamScope": { @@ -69049,6 +70718,9 @@ "smithy.api#xmlName": "ipamScope" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyLaunchTemplate": { @@ -69115,6 +70787,9 @@ "smithy.api#xmlName": "launchTemplate" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyLocalGatewayRoute": { @@ -69188,6 +70863,9 @@ "smithy.api#xmlName": "route" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyManagedPrefixList": { @@ -69273,6 +70951,9 @@ "smithy.api#xmlName": "prefixList" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyNetworkInterfaceAttribute": { @@ -69424,6 +71105,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyReservedInstances": { @@ -69486,7 +71170,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of ModifyReservedInstances.
" + "smithy.api#documentation": "Contains the output of ModifyReservedInstances.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifySecurityGroupRules": { @@ -69547,6 +71232,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifySnapshotAttribute": { @@ -69680,6 +71368,9 @@ "smithy.api#xmlName": "tieringStartTime" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifySpotFleetRequest": { @@ -69924,6 +71615,9 @@ "smithy.api#xmlName": "trafficMirrorFilter" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyTrafficMirrorFilterRule": { @@ -70038,6 +71732,9 @@ "smithy.api#xmlName": "trafficMirrorFilterRule" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyTrafficMirrorSession": { @@ -70136,6 +71833,9 @@ "smithy.api#xmlName": "trafficMirrorSession" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyTransitGateway": { @@ -70289,6 +71989,9 @@ "smithy.api#xmlName": "transitGatewayPrefixListReference" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyTransitGatewayRequest": { @@ -70338,6 +72041,9 @@ "smithy.api#xmlName": "transitGateway" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyTransitGatewayVpcAttachment": { @@ -70431,6 +72137,9 @@ "smithy.api#xmlName": "transitGatewayVpcAttachment" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyVerifiedAccessEndpoint": { @@ -70575,6 +72284,9 @@ "smithy.api#xmlName": "policyDocument" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyVerifiedAccessEndpointRequest": { @@ -70643,6 +72355,9 @@ "smithy.api#xmlName": "verifiedAccessEndpoint" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyVerifiedAccessEndpointSubnetIdList": { @@ -70745,6 +72460,9 @@ "smithy.api#xmlName": "policyDocument" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyVerifiedAccessGroupRequest": { @@ -70801,6 +72519,9 @@ "smithy.api#xmlName": "verifiedAccessGroup" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyVerifiedAccessInstance": { @@ -70877,6 +72598,9 @@ "smithy.api#xmlName": "loggingConfiguration" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyVerifiedAccessInstanceRequest": { @@ -70927,6 +72651,9 @@ "smithy.api#xmlName": "verifiedAccessInstance" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyVerifiedAccessTrustProvider": { @@ -71045,6 +72772,9 @@ "smithy.api#xmlName": "verifiedAccessTrustProvider" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyVolume": { @@ -71176,6 +72906,9 @@ "smithy.api#xmlName": "volumeModification" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyVpcAttribute": { @@ -71299,6 +73032,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyVpcEndpointRequest": { @@ -71414,6 +73150,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyVpcEndpointServiceConfiguration": { @@ -71529,6 +73268,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyVpcEndpointServicePayerResponsibility": { @@ -71588,6 +73330,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyVpcEndpointServicePermissions": { @@ -71659,6 +73404,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyVpcPeeringConnectionOptions": { @@ -71728,6 +73476,9 @@ "smithy.api#xmlName": "requesterPeeringConnectionOptions" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyVpcTenancy": { @@ -71787,6 +73538,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyVpnConnection": { @@ -71872,6 +73626,9 @@ "smithy.api#xmlName": "vpnConnection" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyVpnConnectionRequest": { @@ -71927,6 +73684,9 @@ "smithy.api#xmlName": "vpnConnection" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyVpnTunnelCertificate": { @@ -71984,6 +73744,9 @@ "smithy.api#xmlName": "vpnConnection" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyVpnTunnelOptions": { @@ -72057,6 +73820,9 @@ "smithy.api#xmlName": "vpnConnection" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ModifyVpnTunnelOptionsSpecification": { @@ -72258,6 +74024,9 @@ "smithy.api#xmlName": "instancesSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#Monitoring": { @@ -72314,7 +74083,7 @@ "target": "com.amazonaws.ec2#MoveAddressToVpcResult" }, "traits": { - "smithy.api#documentation": "Moves an Elastic IP address from the EC2-Classic platform to the EC2-VPC platform. The\n Elastic IP address must be allocated to your account for more than 24 hours, and it must not\n be associated with an instance. After the Elastic IP address is moved, it is no longer\n available for use in the EC2-Classic platform, unless you move it back using the\n RestoreAddressToClassic request. You cannot move an Elastic IP address that was\n originally allocated for use in the EC2-VPC platform to the EC2-Classic platform.
\nWe are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide.
\nThis action is deprecated.
\nMoves an Elastic IP address from the EC2-Classic platform to the EC2-VPC platform. The\n Elastic IP address must be allocated to your account for more than 24 hours, and it must not\n be associated with an instance. After the Elastic IP address is moved, it is no longer\n available for use in the EC2-Classic platform, unless you move it back using the\n RestoreAddressToClassic request. You cannot move an Elastic IP address that was\n originally allocated for use in the EC2-VPC platform to the EC2-Classic platform.
" } }, "com.amazonaws.ec2#MoveAddressToVpcRequest": { @@ -72364,6 +74133,9 @@ "smithy.api#xmlName": "status" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#MoveByoipCidrToIpam": { @@ -72429,6 +74201,9 @@ "smithy.api#xmlName": "byoipCidr" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#MoveStatus": { @@ -72455,7 +74230,7 @@ "target": "com.amazonaws.ec2#MoveStatus", "traits": { "aws.protocols#ec2QueryName": "MoveStatus", - "smithy.api#documentation": "The status of the Elastic IP address that's being moved to the EC2-VPC platform, or restored to the EC2-Classic platform.
", + "smithy.api#documentation": "The status of the Elastic IP address that's being moved or restored.
", "smithy.api#xmlName": "moveStatus" } }, @@ -72469,7 +74244,7 @@ } }, "traits": { - "smithy.api#documentation": "Describes the status of a moving Elastic IP address.
\nWe are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide.
\nThis action is deprecated.
\nDescribes the status of a moving Elastic IP address.
" } }, "com.amazonaws.ec2#MovingAddressStatusSet": { @@ -74263,6 +76038,15 @@ } } }, + "com.amazonaws.ec2#NetworkInterfaceIdSet": { + "type": "list", + "member": { + "target": "com.amazonaws.ec2#String", + "traits": { + "smithy.api#xmlName": "item" + } + } + }, "com.amazonaws.ec2#NetworkInterfaceIpv6Address": { "type": "structure", "members": { @@ -77333,6 +79117,9 @@ "smithy.api#xmlName": "byoipCidr" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ProvisionIpamPoolCidr": { @@ -77409,6 +79196,9 @@ "smithy.api#xmlName": "ipamPoolCidr" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ProvisionPublicIpv4PoolCidr": { @@ -77483,6 +79273,9 @@ "smithy.api#xmlName": "poolAddressRange" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ProvisionedBandwidth": { @@ -77894,6 +79687,9 @@ "smithy.api#xmlName": "totalUpfrontPrice" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#PurchaseRequest": { @@ -78010,7 +79806,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of PurchaseReservedInstancesOffering.
" + "smithy.api#documentation": "Contains the output of PurchaseReservedInstancesOffering.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#PurchaseScheduledInstances": { @@ -78071,7 +79868,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of PurchaseScheduledInstances.
" + "smithy.api#documentation": "Contains the output of PurchaseScheduledInstances.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#PurchaseSet": { @@ -78492,7 +80290,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of RegisterImage.
" + "smithy.api#documentation": "Contains the output of RegisterImage.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#RegisterInstanceEventNotificationAttributes": { @@ -78521,7 +80320,9 @@ "InstanceTagAttribute": { "target": "com.amazonaws.ec2#RegisterInstanceTagAttributeRequest", "traits": { - "smithy.api#documentation": "Information about the tag keys to register.
" + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "Information about the tag keys to register.
", + "smithy.api#required": {} } } }, @@ -78540,6 +80341,9 @@ "smithy.api#xmlName": "instanceTagAttribute" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#RegisterInstanceTagAttributeRequest": { @@ -78626,6 +80430,9 @@ "smithy.api#xmlName": "registeredMulticastGroupMembers" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#RegisterTransitGatewayMulticastGroupSources": { @@ -78689,6 +80496,9 @@ "smithy.api#xmlName": "registeredMulticastGroupSources" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#RejectTransitGatewayMulticastDomainAssociations": { @@ -78748,6 +80558,9 @@ "smithy.api#xmlName": "associations" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#RejectTransitGatewayPeeringAttachment": { @@ -78797,6 +80610,9 @@ "smithy.api#xmlName": "transitGatewayPeeringAttachment" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#RejectTransitGatewayVpcAttachment": { @@ -78846,6 +80662,9 @@ "smithy.api#xmlName": "transitGatewayVpcAttachment" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#RejectVpcEndpointConnections": { @@ -78904,6 +80723,9 @@ "smithy.api#xmlName": "unsuccessful" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#RejectVpcPeeringConnection": { @@ -78959,6 +80781,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ReleaseAddress": { @@ -78970,7 +80795,7 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "Releases the specified Elastic IP address.
\n[EC2-Classic, default VPC] Releasing an Elastic IP address automatically disassociates it\n\t\t\t\tfrom any instance that it's associated with. To disassociate an Elastic IP address without\n\t\t\t\treleasing it, use DisassociateAddress.
\nWe are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide.
\n[Nondefault VPC] You must use DisassociateAddress to disassociate the Elastic IP address\n\t\t\t before you can release it. Otherwise, Amazon EC2 returns an error (InvalidIPAddress.InUse
).
After releasing an Elastic IP address, it is released to the IP address pool. \n Be sure to update your DNS records and any servers or devices that communicate with the address. \n If you attempt to release an Elastic IP address that you already released, you'll get an\n AuthFailure
error if the address is already allocated to another Amazon Web Services account.
[EC2-VPC] After you release an Elastic IP address for use in a VPC, you might be able to recover it.\n For more information, see AllocateAddress.
\nFor more\n information, see Elastic IP\n Addresses in the Amazon Elastic Compute Cloud User Guide.
" + "smithy.api#documentation": "Releases the specified Elastic IP address.
\n[Default VPC] Releasing an Elastic IP address automatically disassociates it\n\t\t\t\tfrom any instance that it's associated with. To disassociate an Elastic IP address without\n\t\t\t\treleasing it, use DisassociateAddress.
\n[Nondefault VPC] You must use DisassociateAddress to disassociate the Elastic IP address\n\t\t\t before you can release it. Otherwise, Amazon EC2 returns an error (InvalidIPAddress.InUse
).
After releasing an Elastic IP address, it is released to the IP address pool. \n Be sure to update your DNS records and any servers or devices that communicate with the address. \n If you attempt to release an Elastic IP address that you already released, you'll get an\n AuthFailure
error if the address is already allocated to another Amazon Web Services account.
After you release an Elastic IP address, you might be able to recover it.\n For more information, see AllocateAddress.
" } }, "com.amazonaws.ec2#ReleaseAddressRequest": { @@ -78979,13 +80804,13 @@ "AllocationId": { "target": "com.amazonaws.ec2#AllocationId", "traits": { - "smithy.api#documentation": "[EC2-VPC] The allocation ID. Required for EC2-VPC.
" + "smithy.api#documentation": "The allocation ID. This parameter is required.
" } }, "PublicIp": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "[EC2-Classic] The Elastic IP address. Required for EC2-Classic.
" + "smithy.api#documentation": "Deprecated.
" } }, "NetworkBorderGroup": { @@ -79058,6 +80883,9 @@ "smithy.api#xmlName": "unsuccessful" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ReleaseIpamPoolAllocation": { @@ -79069,7 +80897,7 @@ "target": "com.amazonaws.ec2#ReleaseIpamPoolAllocationResult" }, "traits": { - "smithy.api#documentation": "Release an allocation within an IPAM pool. You can only use this action to release manual allocations. To remove an allocation for a resource without deleting the resource, set its monitored state to false using ModifyIpamResourceCidr. For more information, see Release an allocation in the Amazon VPC IPAM User Guide.\n
\nAll EC2 API actions follow an eventual consistency model.
\nRelease an allocation within an IPAM pool. The Region you use should be the IPAM pool locale. The locale is the Amazon Web Services Region where this IPAM pool is available for allocations. You can only use this action to release manual allocations. To remove an allocation for a resource without deleting the resource, set its monitored state to false using ModifyIpamResourceCidr. For more information, see Release an allocation in the Amazon VPC IPAM User Guide.\n
\nAll EC2 API actions follow an eventual consistency model.
\nContains the output of RequestSpotInstances.
" + "smithy.api#documentation": "Contains the output of RequestSpotInstances.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#RequestSpotLaunchSpecification": { @@ -81721,6 +83568,9 @@ "smithy.api#xmlName": "address" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ResetEbsDefaultKmsKeyId": { @@ -81762,6 +83612,9 @@ "smithy.api#xmlName": "kmsKeyId" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ResetFpgaImageAttribute": { @@ -81830,6 +83683,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ResetImageAttribute": { @@ -82617,6 +84473,12 @@ "traits": { "smithy.api#enumValue": "ipam-resource-discovery-association" } + }, + "instance_connect_endpoint": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "instance-connect-endpoint" + } } } }, @@ -82939,7 +84801,7 @@ "target": "com.amazonaws.ec2#RestoreAddressToClassicResult" }, "traits": { - "smithy.api#documentation": "Restores an Elastic IP address that was previously moved to the EC2-VPC platform back to the EC2-Classic platform. You cannot move an Elastic IP address that was originally allocated for use in EC2-VPC. The Elastic IP address must not be associated with an instance or network interface.
\nWe are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide.
\nThis action is deprecated.
\nRestores an Elastic IP address that was previously moved to the EC2-VPC platform back to the EC2-Classic platform. You cannot move an Elastic IP address that was originally allocated for use in EC2-VPC. The Elastic IP address must not be associated with an instance or network interface.
" } }, "com.amazonaws.ec2#RestoreAddressToClassicRequest": { @@ -82989,6 +84851,9 @@ "smithy.api#xmlName": "status" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#RestoreImageFromRecycleBin": { @@ -83040,6 +84905,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#RestoreManagedPrefixListVersion": { @@ -83107,6 +84975,9 @@ "smithy.api#xmlName": "prefixList" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#RestoreSnapshotFromRecycleBin": { @@ -83232,6 +85103,9 @@ "smithy.api#xmlName": "volumeSize" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#RestoreSnapshotTier": { @@ -83326,6 +85200,9 @@ "smithy.api#xmlName": "isPermanentRestore" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ResultRange": { @@ -83407,6 +85284,9 @@ "smithy.api#xmlName": "status" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#RevokeSecurityGroupEgress": { @@ -83537,6 +85417,9 @@ "smithy.api#xmlName": "unknownIpPermissionSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#RevokeSecurityGroupIngress": { @@ -83655,6 +85538,9 @@ "smithy.api#xmlName": "unknownIpPermissionSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#RoleId": { @@ -84604,7 +86490,8 @@ } }, "traits": { - "smithy.api#documentation": "Contains the output of RunScheduledInstances.
" + "smithy.api#documentation": "Contains the output of RunScheduledInstances.
", + "smithy.api#output": {} } }, "com.amazonaws.ec2#S3ObjectTag": { @@ -85548,6 +87435,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#SearchTransitGatewayMulticastGroups": { @@ -85632,6 +87522,9 @@ "smithy.api#xmlName": "nextToken" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#SearchTransitGatewayRoutes": { @@ -85708,6 +87601,9 @@ "smithy.api#xmlName": "additionalRoutesAvailable" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#SecurityGroup": { @@ -85794,6 +87690,15 @@ } } }, + "com.amazonaws.ec2#SecurityGroupIdSet": { + "type": "list", + "member": { + "target": "com.amazonaws.ec2#SecurityGroupId", + "traits": { + "smithy.api#xmlName": "item" + } + } + }, "com.amazonaws.ec2#SecurityGroupIdStringList": { "type": "list", "member": { @@ -85803,6 +87708,21 @@ } } }, + "com.amazonaws.ec2#SecurityGroupIdStringListRequest": { + "type": "list", + "member": { + "target": "com.amazonaws.ec2#SecurityGroupId", + "traits": { + "smithy.api#xmlName": "SecurityGroupId" + } + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 16 + } + } + }, "com.amazonaws.ec2#SecurityGroupIdentifier": { "type": "structure", "members": { @@ -88890,6 +90810,9 @@ "smithy.api#xmlName": "instancesSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#StartNetworkInsightsAccessScopeAnalysis": { @@ -88955,6 +90878,9 @@ "smithy.api#xmlName": "networkInsightsAccessScopeAnalysis" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#StartNetworkInsightsAnalysis": { @@ -89034,6 +90960,9 @@ "smithy.api#xmlName": "networkInsightsAnalysis" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#StartVpcEndpointServicePrivateDnsVerification": { @@ -89085,6 +91014,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#State": { @@ -89323,6 +91255,9 @@ "smithy.api#xmlName": "instancesSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#Storage": { @@ -90684,6 +92619,9 @@ "smithy.api#xmlName": "connectionStatuses" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#TerminateConnectionStatus": { @@ -90777,6 +92715,9 @@ "smithy.api#xmlName": "instancesSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ThreadsPerCore": { @@ -94882,6 +96823,9 @@ "smithy.api#xmlName": "unassignedIpv6PrefixSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#UnassignPrivateIpAddresses": { @@ -95002,6 +96946,9 @@ "smithy.api#xmlName": "natGatewayAddressSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#UnlimitedSupportedInstanceFamily": { @@ -95083,6 +97030,9 @@ "smithy.api#xmlName": "instancesSet" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#UnsuccessfulInstanceCreditSpecificationErrorCode": { @@ -95303,6 +97253,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#UpdateSecurityGroupRuleDescriptionsIngress": { @@ -95371,6 +97324,9 @@ "smithy.api#xmlName": "return" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#UsageClassType": { @@ -99699,6 +101655,9 @@ "smithy.api#xmlName": "byoipCidr" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.ec2#ZoneIdStringList": { diff --git a/aws/sdk/aws-models/iam.json b/aws/sdk/aws-models/iam.json index 8453b370fcdb0e6cc699ebb836d0c06ebb36c8ce..3f222206c623eb12b1bec2ad5ece45974c145ce2 100644 --- a/aws/sdk/aws-models/iam.json +++ b/aws/sdk/aws-models/iam.json @@ -2576,6 +2576,9 @@ "target": "smithy.api#Unit" }, "errors": [ + { + "target": "com.amazonaws.iam#ConcurrentModificationException" + }, { "target": "com.amazonaws.iam#EntityAlreadyExistsException" }, @@ -2587,7 +2590,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates an alias for your Amazon Web Services account. For information about using an Amazon Web Services account\n alias, see Using an\n alias for your Amazon Web Services account ID in the\n IAM User Guide.
" + "smithy.api#documentation": "Creates an alias for your Amazon Web Services account. For information about using an Amazon Web Services account\n alias, see Creating, deleting, and\n listing an Amazon Web Services account alias in the Amazon Web Services Sign-In User\n Guide.
" } }, "com.amazonaws.iam#CreateAccountAliasRequest": { @@ -3574,6 +3577,9 @@ "target": "smithy.api#Unit" }, "errors": [ + { + "target": "com.amazonaws.iam#ConcurrentModificationException" + }, { "target": "com.amazonaws.iam#EntityTemporarilyUnmodifiableException" }, @@ -3666,6 +3672,9 @@ "target": "smithy.api#Unit" }, "errors": [ + { + "target": "com.amazonaws.iam#ConcurrentModificationException" + }, { "target": "com.amazonaws.iam#LimitExceededException" }, @@ -3677,7 +3686,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes the specified Amazon Web Services account alias. For information about using an Amazon Web Services\n account alias, see Using an alias for your Amazon Web Services account ID in the\n IAM User Guide.
" + "smithy.api#documentation": "Deletes the specified Amazon Web Services account alias. For information about using an Amazon Web Services\n account alias, see Creating, deleting, and\n listing an Amazon Web Services account alias in the Amazon Web Services Sign-In User\n Guide.
" } }, "com.amazonaws.iam#DeleteAccountAliasRequest": { @@ -4390,6 +4399,9 @@ "target": "smithy.api#Unit" }, "errors": [ + { + "target": "com.amazonaws.iam#ConcurrentModificationException" + }, { "target": "com.amazonaws.iam#LimitExceededException" }, @@ -4558,6 +4570,9 @@ "target": "smithy.api#Unit" }, "errors": [ + { + "target": "com.amazonaws.iam#ConcurrentModificationException" + }, { "target": "com.amazonaws.iam#DeleteConflictException" }, @@ -4838,6 +4853,9 @@ "target": "smithy.api#Unit" }, "errors": [ + { + "target": "com.amazonaws.iam#ConcurrentModificationException" + }, { "target": "com.amazonaws.iam#EntityAlreadyExistsException" }, @@ -7364,7 +7382,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists the account alias associated with the Amazon Web Services account (Note: you can have only\n one). For information about using an Amazon Web Services account alias, see Using an alias for your\n Amazon Web Services account ID in the IAM User Guide.
", + "smithy.api#documentation": "Lists the account alias associated with the Amazon Web Services account (Note: you can have only\n one). For information about using an Amazon Web Services account alias, see Creating,\n deleting, and listing an Amazon Web Services account alias in the Amazon Web Services Sign-In\n User Guide.
", "smithy.api#paginated": { "inputToken": "Marker", "outputToken": "Marker", @@ -9083,7 +9101,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists the IAM roles that have the specified path prefix. If there are none, the\n operation returns an empty list. For more information about roles, see Working with\n roles.
\nIAM resource-listing operations return a subset of the available \n attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view all of the information for a role, see GetRole.
\nYou can paginate the results using the MaxItems
and Marker
\n parameters.
Lists the IAM roles that have the specified path prefix. If there are none, the\n operation returns an empty list. For more information about roles, see Working with\n roles.
\nIAM resource-listing operations return a subset of the available \n attributes for the resource. This operation does not return the following attributes, even though they are an attribute of the returned object:
\nPermissionsBoundary
\nRoleLastUsed
\nTags
\nTo view all of the information for a role, see GetRole.
\nYou can paginate the results using the MaxItems
and Marker
\n parameters.
Lists the IAM users that have the specified path prefix. If no path prefix is\n specified, the operation returns all users in the Amazon Web Services account. If there are none, the\n operation returns an empty list.
\nIAM resource-listing operations return a subset of the available \n attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view all of the information for a user, see GetUser.
\nYou can paginate the results using the MaxItems
and Marker
\n parameters.
Lists the IAM users that have the specified path prefix. If no path prefix is\n specified, the operation returns all users in the Amazon Web Services account. If there are none, the\n operation returns an empty list.
\nIAM resource-listing operations return a subset of the available \n attributes for the resource. This operation does not return the following attributes, even though they are an attribute of the returned object:
\nPermissionsBoundary
\nTags
\nTo view all of the information for a user, see GetUser.
\nYou can paginate the results using the MaxItems
and Marker
\n parameters.
The base32 seed defined as specified in RFC3548. The Base32StringSeed
is base64-encoded.
The base32 seed defined as specified in RFC3548. The Base32StringSeed
is base32-encoded.
Creates a unique customer managed KMS key in your Amazon Web Services account and Region.\n You can use a KMS key in cryptographic operations, such as encryption and signing. Some Amazon Web Services\n services let you use KMS keys that you create and manage to protect your service\n resources.
\nA KMS key is a logical representation of a cryptographic key. In addition to the key\n material used in cryptographic operations, a KMS key includes metadata, such as the key ID,\n key policy, creation date, description, and key state. For details, see Managing keys in the\n Key Management Service Developer Guide\n
\nUse the parameters of CreateKey
to specify the type of KMS key, the source of\n its key material, its key policy, description, tags, and other properties.
KMS has replaced the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.
\nTo create different types of KMS keys, use the following guidance:
\nBy default, CreateKey
creates a symmetric encryption KMS key with key\n material that KMS generates. This is the basic and most widely used type of KMS key, and\n provides the best performance.
To create a symmetric encryption KMS key, you don't need to specify any parameters.\n The default value for KeySpec
, SYMMETRIC_DEFAULT
, the default\n value for KeyUsage
, ENCRYPT_DECRYPT
, and the default value for\n Origin
, AWS_KMS
, create a symmetric encryption KMS key with\n KMS key material.
If you need a key for basic encryption and decryption or you are creating a KMS key\n to protect your resources in an Amazon Web Services service, create a symmetric encryption KMS key.\n The key material in a symmetric encryption key never leaves KMS unencrypted. You can\n use a symmetric encryption KMS key to encrypt and decrypt data up to 4,096 bytes, but\n they are typically used to generate data keys and data keys pairs. For details, see\n GenerateDataKey and GenerateDataKeyPair.
\n\n
To create an asymmetric KMS key, use the KeySpec
parameter to specify\n the type of key material in the KMS key. Then, use the KeyUsage
parameter\n to determine whether the KMS key will be used to encrypt and decrypt or sign and verify.\n You can't change these properties after the KMS key is created.
Asymmetric KMS keys contain an RSA key pair, Elliptic Curve (ECC) key pair, or an SM2 key pair (China Regions only). The private key in an asymmetric \n KMS key never leaves KMS unencrypted. However, you can use the GetPublicKey operation to download the public key\n so it can be used outside of KMS. KMS keys with RSA or SM2 key pairs can be used to encrypt or decrypt data or sign and verify messages (but not both). \n KMS keys with ECC key pairs can be used only to sign and verify messages. \n For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.
\n\n
To create an HMAC KMS key, set the KeySpec
parameter to a key spec\n value for HMAC KMS keys. Then set the KeyUsage
parameter to\n GENERATE_VERIFY_MAC
. You must set the key usage even though\n GENERATE_VERIFY_MAC
is the only valid key usage value for HMAC KMS keys.\n You can't change these properties after the KMS key is created.
HMAC KMS keys are symmetric keys that never leave KMS unencrypted. You can use\n HMAC keys to generate (GenerateMac) and verify (VerifyMac) HMAC codes for messages up to 4096 bytes.
\nHMAC KMS keys are not supported in all Amazon Web Services Regions. If you try to create an HMAC\n KMS key in an Amazon Web Services Region in which HMAC keys are not supported, the\n CreateKey
operation returns an\n UnsupportedOperationException
. For a list of Regions in which HMAC KMS keys\n are supported, see HMAC keys in\n KMS in the Key Management Service Developer Guide.
\n
To create a multi-Region primary key in the local Amazon Web Services Region,\n use the MultiRegion
parameter with a value of True
. To create\n a multi-Region replica key, that is, a KMS key with the same key ID\n and key material as a primary key, but in a different Amazon Web Services Region, use the ReplicateKey operation. To change a replica key to a primary key, and its\n primary key to a replica key, use the UpdatePrimaryRegion\n operation.
You can create multi-Region KMS keys for all supported KMS key types: symmetric\n encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric\n signing KMS keys. You can also create multi-Region keys with imported key material.\n However, you can't create multi-Region keys in a custom key store.
\nThis operation supports multi-Region keys, an KMS feature that lets you create multiple\n interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key\n material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt\n it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.
\n\n
To import your own key material into a KMS key, begin by creating a symmetric\n encryption KMS key with no key material. To do this, use the Origin
\n parameter of CreateKey
with a value of EXTERNAL
. Next, use\n GetParametersForImport operation to get a public key and import\n token, and use the public key to encrypt your key material. Then, use ImportKeyMaterial with your import token to import the key material. For\n step-by-step instructions, see Importing Key Material in the \n Key Management Service Developer Guide\n .
This feature supports only symmetric encryption KMS keys, including multi-Region\n symmetric encryption KMS keys. You cannot import key material into any other type of KMS\n key.
\nTo create a multi-Region primary key with imported key material, use the\n Origin
parameter of CreateKey
with a value of\n EXTERNAL
and the MultiRegion
parameter with a value of\n True
. To create replicas of the multi-Region primary key, use the ReplicateKey operation. For instructions, see Importing key material into\n multi-Region keys. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.
\n
A custom key store lets you protect your Amazon Web Services resources using keys in a backing key\n store that you own and manage. When you request a cryptographic operation with a KMS key\n in a custom key store, the operation is performed in the backing key store using its\n cryptographic keys.
\nKMS supports CloudHSM key stores backed by an CloudHSM cluster and external key stores backed by an\n external key manager outside of Amazon Web Services. When you create a KMS key in an CloudHSM key store,\n KMS generates an encryption key in the CloudHSM cluster and associates it with the KMS\n key. When you create a KMS key in an external key store, you specify an existing\n encryption key in the external key manager.
\nSome external key managers provide a simpler method for creating a KMS key in an\n external key store. For details, see your external key manager documentation.
\nBefore you create a KMS key in a custom key store, the ConnectionState
\n of the key store must be CONNECTED
. To connect the custom key store, use\n the ConnectCustomKeyStore operation. To find the\n ConnectionState
, use the DescribeCustomKeyStores\n operation.
To create a KMS key in a custom key store, use the CustomKeyStoreId
.\n Use the default KeySpec
value, SYMMETRIC_DEFAULT
, and the\n default KeyUsage
value, ENCRYPT_DECRYPT
to create a symmetric\n encryption key. No other key type is supported in a custom key store.
To create a KMS key in an CloudHSM key store, use the\n Origin
parameter with a value of AWS_CLOUDHSM
. The CloudHSM\n cluster that is associated with the custom key store must have at least two active HSMs\n in different Availability Zones in the Amazon Web Services Region.
To create a KMS key in an external key store, use the Origin
parameter\n with a value of EXTERNAL_KEY_STORE
and an XksKeyId
parameter\n that identifies an existing external key.
Some external key managers provide a simpler method for creating a KMS key in an\n external key store. For details, see your external key manager documentation.
\n\n Cross-account use: No. You cannot use this operation to\n create a KMS key in a different Amazon Web Services account.
\n\n Required permissions: kms:CreateKey (IAM policy). To use the\n Tags
parameter, kms:TagResource (IAM policy). For examples and information about related\n permissions, see Allow a user to create\n KMS keys in the Key Management Service Developer Guide.
\n Related operations:\n
\n\n DescribeKey\n
\n\n ListKeys\n
\n\n ScheduleKeyDeletion\n
\nCreates a unique customer managed KMS key in your Amazon Web Services account and Region.\n You can use a KMS key in cryptographic operations, such as encryption and signing. Some Amazon Web Services\n services let you use KMS keys that you create and manage to protect your service\n resources.
\nA KMS key is a logical representation of a cryptographic key. In addition to the key\n material used in cryptographic operations, a KMS key includes metadata, such as the key ID,\n key policy, creation date, description, and key state. For details, see Managing keys in the\n Key Management Service Developer Guide\n
\nUse the parameters of CreateKey
to specify the type of KMS key, the source of\n its key material, its key policy, description, tags, and other properties.
KMS has replaced the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.
\nTo create different types of KMS keys, use the following guidance:
\nBy default, CreateKey
creates a symmetric encryption KMS key with key\n material that KMS generates. This is the basic and most widely used type of KMS key, and\n provides the best performance.
To create a symmetric encryption KMS key, you don't need to specify any parameters.\n The default value for KeySpec
, SYMMETRIC_DEFAULT
, the default\n value for KeyUsage
, ENCRYPT_DECRYPT
, and the default value for\n Origin
, AWS_KMS
, create a symmetric encryption KMS key with\n KMS key material.
If you need a key for basic encryption and decryption or you are creating a KMS key\n to protect your resources in an Amazon Web Services service, create a symmetric encryption KMS key.\n The key material in a symmetric encryption key never leaves KMS unencrypted. You can\n use a symmetric encryption KMS key to encrypt and decrypt data up to 4,096 bytes, but\n they are typically used to generate data keys and data keys pairs. For details, see\n GenerateDataKey and GenerateDataKeyPair.
\n\n
To create an asymmetric KMS key, use the KeySpec
parameter to specify\n the type of key material in the KMS key. Then, use the KeyUsage
parameter\n to determine whether the KMS key will be used to encrypt and decrypt or sign and verify.\n You can't change these properties after the KMS key is created.
Asymmetric KMS keys contain an RSA key pair, Elliptic Curve (ECC) key pair, or an SM2 key pair (China Regions only). The private key in an asymmetric \n KMS key never leaves KMS unencrypted. However, you can use the GetPublicKey operation to download the public key\n so it can be used outside of KMS. KMS keys with RSA or SM2 key pairs can be used to encrypt or decrypt data or sign and verify messages (but not both). \n KMS keys with ECC key pairs can be used only to sign and verify messages. \n For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.
\n\n
To create an HMAC KMS key, set the KeySpec
parameter to a key spec\n value for HMAC KMS keys. Then set the KeyUsage
parameter to\n GENERATE_VERIFY_MAC
. You must set the key usage even though\n GENERATE_VERIFY_MAC
is the only valid key usage value for HMAC KMS keys.\n You can't change these properties after the KMS key is created.
HMAC KMS keys are symmetric keys that never leave KMS unencrypted. You can use\n HMAC keys to generate (GenerateMac) and verify (VerifyMac) HMAC codes for messages up to 4096 bytes.
\n\n
To create a multi-Region primary key in the local Amazon Web Services Region,\n use the MultiRegion
parameter with a value of True
. To create\n a multi-Region replica key, that is, a KMS key with the same key ID\n and key material as a primary key, but in a different Amazon Web Services Region, use the ReplicateKey operation. To change a replica key to a primary key, and its\n primary key to a replica key, use the UpdatePrimaryRegion\n operation.
You can create multi-Region KMS keys for all supported KMS key types: symmetric\n encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric\n signing KMS keys. You can also create multi-Region keys with imported key material.\n However, you can't create multi-Region keys in a custom key store.
\nThis operation supports multi-Region keys, an KMS feature that lets you create multiple\n interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key\n material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt\n it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.
\n\n
To import your own key material into a KMS key, begin by creating a KMS key with no\n key material. To do this, use the Origin
parameter of\n CreateKey
with a value of EXTERNAL
. Next, use GetParametersForImport operation to get a public key and import token. Use\n the wrapping public key to encrypt your key material. Then, use ImportKeyMaterial with your import token to import the key material. For step-by-step instructions, see\n Importing Key Material in the \n Key Management Service Developer Guide\n .
You can import key material into KMS keys of all supported KMS key types: symmetric\n encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric\n signing KMS keys. You can also create multi-Region keys with imported key material.\n However, you can't import key material into a KMS key in a custom key store.
\nTo create a multi-Region primary key with imported key material, use the\n Origin
parameter of CreateKey
with a value of\n EXTERNAL
and the MultiRegion
parameter with a value of\n True
. To create replicas of the multi-Region primary key, use the ReplicateKey operation. For instructions, see Importing key material into\n multi-Region keys. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.
\n
A custom key store lets you protect your Amazon Web Services resources using keys in a backing key\n store that you own and manage. When you request a cryptographic operation with a KMS key\n in a custom key store, the operation is performed in the backing key store using its\n cryptographic keys.
\nKMS supports CloudHSM key stores backed by an CloudHSM cluster and external key stores backed by an\n external key manager outside of Amazon Web Services. When you create a KMS key in an CloudHSM key store,\n KMS generates an encryption key in the CloudHSM cluster and associates it with the KMS\n key. When you create a KMS key in an external key store, you specify an existing\n encryption key in the external key manager.
\nSome external key managers provide a simpler method for creating a KMS key in an\n external key store. For details, see your external key manager documentation.
\nBefore you create a KMS key in a custom key store, the ConnectionState
\n of the key store must be CONNECTED
. To connect the custom key store, use\n the ConnectCustomKeyStore operation. To find the\n ConnectionState
, use the DescribeCustomKeyStores\n operation.
To create a KMS key in a custom key store, use the CustomKeyStoreId
.\n Use the default KeySpec
value, SYMMETRIC_DEFAULT
, and the\n default KeyUsage
value, ENCRYPT_DECRYPT
to create a symmetric\n encryption key. No other key type is supported in a custom key store.
To create a KMS key in an CloudHSM key store, use the\n Origin
parameter with a value of AWS_CLOUDHSM
. The CloudHSM\n cluster that is associated with the custom key store must have at least two active HSMs\n in different Availability Zones in the Amazon Web Services Region.
To create a KMS key in an external key store, use the Origin
parameter\n with a value of EXTERNAL_KEY_STORE
and an XksKeyId
parameter\n that identifies an existing external key.
Some external key managers provide a simpler method for creating a KMS key in an\n external key store. For details, see your external key manager documentation.
\n\n Cross-account use: No. You cannot use this operation to\n create a KMS key in a different Amazon Web Services account.
\n\n Required permissions: kms:CreateKey (IAM policy). To use the\n Tags
parameter, kms:TagResource (IAM policy). For examples and information about related\n permissions, see Allow a user to create\n KMS keys in the Key Management Service Developer Guide.
\n Related operations:\n
\n\n DescribeKey\n
\n\n ListKeys\n
\n\n ScheduleKeyDeletion\n
\nDeletes key material that you previously imported. This operation makes the specified KMS\n key unusable. For more information about importing key material into KMS, see Importing Key Material\n in the Key Management Service Developer Guide.
\nWhen the specified KMS key is in the PendingDeletion
state, this operation\n does not change the KMS key's state. Otherwise, it changes the KMS key's state to\n PendingImport
.
After you delete key material, you can use ImportKeyMaterial to reimport\n the same key material into the KMS key.
\nThe KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.
\n\n Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.
\n\n Required permissions: kms:DeleteImportedKeyMaterial (key policy)
\n\n Related operations:\n
\n\n ImportKeyMaterial\n
\nDeletes key material that was previously imported. This operation makes the specified KMS\n key temporarily unusable. To restore the usability of the KMS key, reimport the same key\n material. For more information about importing key material into KMS, see Importing Key Material\n in the Key Management Service Developer Guide.
\nWhen the specified KMS key is in the PendingDeletion
state, this operation\n does not change the KMS key's state. Otherwise, it changes the KMS key's state to\n PendingImport
.
The KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.
\n\n Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.
\n\n Required permissions: kms:DeleteImportedKeyMaterial (key policy)
\n\n Related operations:\n
\n\n ImportKeyMaterial\n
\nReturns the items you need to import key material into a symmetric encryption KMS key. For\n more information about importing key material into KMS, see Importing key material in the\n Key Management Service Developer Guide.
\nThis operation returns a public key and an import token. Use the public key to encrypt the\n symmetric key material. Store the import token to send with a subsequent ImportKeyMaterial request.
\nYou must specify the key ID of the symmetric encryption KMS key into which you will import\n key material. The KMS key Origin
must be EXTERNAL
. You must also\n specify the wrapping algorithm and type of wrapping key (public key) that you will use to\n encrypt the key material. You cannot perform this operation on an asymmetric KMS key, an HMAC KMS key, or on any KMS key in a different Amazon Web Services account.
To import key material, you must use the public key and import token from the same\n response. These items are valid for 24 hours. The expiration date and time appear in the\n GetParametersForImport
response. You cannot use an expired token in an ImportKeyMaterial request. If your key and token expire, send another\n GetParametersForImport
request.
The KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.
\n\n Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.
\n\n Required permissions: kms:GetParametersForImport (key policy)
\n\n Related operations:\n
\n\n ImportKeyMaterial\n
\nReturns the public key and an import token you need to import or reimport key material for\n a KMS key.
\nBy default, KMS keys are created with key material that KMS generates. This operation\n supports Importing key\n material, an advanced feature that lets you generate and import the cryptographic\n key material for a KMS key. For more information about importing key material into KMS, see\n Importing key\n material in the Key Management Service Developer Guide.
\nBefore calling GetParametersForImport
, use the CreateKey\n operation with an Origin
value of EXTERNAL
to create a KMS key with\n no key material. You can import key material for a symmetric encryption KMS key, HMAC KMS key,\n asymmetric encryption KMS key, or asymmetric signing KMS key. You can also import key material\n into a multi-Region key of\n any supported type. However, you can't import key material into a KMS key in a custom key store. You can also use\n GetParametersForImport
to get a public key and import token to reimport the original key material into a KMS key whose key material expired or was\n deleted.
\n GetParametersForImport
returns the items that you need to import your key\n material.
The public key (or \"wrapping key\") of an RSA key pair that KMS generates.
\nYou will use this public key to encrypt (\"wrap\") your key material while it's in\n transit to KMS.
\nA import token that ensures that KMS can decrypt your key material and associate it with the correct KMS key.
\nThe public key and its import token are permanently linked and must be used together. Each\n public key and import token set is valid for 24 hours. The expiration date and time appear in\n the ParametersValidTo
field in the GetParametersForImport
response.\n You cannot use an expired public key or import token in an ImportKeyMaterial\n request. If your key and token expire, send another GetParametersForImport
\n request.
\n GetParametersForImport
requires the following information:
The key ID of the KMS key for which you are importing the key material.
\nThe key spec of the public key (\"wrapping key\") that you will use to encrypt your key\n material during import.
\nThe wrapping algorithm that you will use with the public key to encrypt your key\n material.
\nYou can use the same or a different public key spec and wrapping algorithm each time you\n import or reimport the same key material.
\nThe KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.
\n\n Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.
\n\n Required permissions: kms:GetParametersForImport (key policy)
\n\n Related operations:\n
\n\n ImportKeyMaterial\n
\nThe identifier of the symmetric encryption KMS key into which you will import key\n material. The Origin
of the KMS key must be EXTERNAL
.
Specify the key ID or key ARN of the KMS key.
\nFor example:
\nKey ID: 1234abcd-12ab-34cd-56ef-1234567890ab
\n
Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
\n
To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.
", + "smithy.api#documentation": "The identifier of the KMS key that will be associated with the imported key material. The\n Origin
of the KMS key must be EXTERNAL
.
All KMS key types are supported, including multi-Region keys. However, you cannot import\n key material into a KMS key in a custom key store.
\nSpecify the key ID or key ARN of the KMS key.
\nFor example:
\nKey ID: 1234abcd-12ab-34cd-56ef-1234567890ab
\n
Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
\n
To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.
", "smithy.api#required": {} } }, "WrappingAlgorithm": { "target": "com.amazonaws.kms#AlgorithmSpec", "traits": { - "smithy.api#documentation": "The algorithm you will use to encrypt the key material before using the ImportKeyMaterial operation to import it. For more information, see Encrypt the\n key material in the Key Management Service Developer Guide.
\nThe RSAES_PKCS1_V1_5
wrapping algorithm is deprecated. We recommend that\n you begin using a different wrapping algorithm immediately. KMS will end support for\n RSAES_PKCS1_V1_5
by October 1, 2023 pursuant to cryptographic key management guidance from the National Institute of Standards\n and Technology (NIST).
The algorithm you will use with the RSA public key (PublicKey
) in the\n response to protect your key material during import. For more information, see Select a wrapping algorithm in the Key Management Service Developer Guide.
For RSA_AES wrapping algorithms, you encrypt your key material with an AES key that you\n generate, then encrypt your AES key with the RSA public key from KMS. For RSAES wrapping\n algorithms, you encrypt your key material directly with the RSA public key from KMS.
\nThe wrapping algorithms that you can use depend on the type of key material that you are\n importing. To import an RSA private key, you must use an RSA_AES wrapping algorithm.
\n\n RSA_AES_KEY_WRAP_SHA_256 — Supported for wrapping RSA and ECC key\n material.
\n\n RSA_AES_KEY_WRAP_SHA_1 — Supported for wrapping RSA and ECC key material.
\n\n RSAES_OAEP_SHA_256 — Supported for all types of key material, except RSA key material (private key).
\nYou cannot use the RSAES_OAEP_SHA_256 wrapping algorithm with the RSA_2048 wrapping key spec to wrap \n ECC_NIST_P521 key material.
\n\n RSAES_OAEP_SHA_1 — Supported for all types of key material, except RSA key material (private\n key).
\nYou cannot use the RSAES_OAEP_SHA_1 wrapping algorithm with the RSA_2048 wrapping key spec to wrap \n ECC_NIST_P521 key material.
\n\n RSAES_PKCS1_V1_5 (Deprecated) — Supported only for symmetric encryption key\n material (and only in legacy mode).
\nThe type of wrapping key (public key) to return in the response. Only 2048-bit RSA public\n keys are supported.
", + "smithy.api#documentation": "The type of RSA public key to return in the response. You will use this wrapping key with\n the specified wrapping algorithm to protect your key material during import.
\nUse the longest RSA wrapping key that is practical.
\nYou cannot use an RSA_2048 public key to directly wrap an ECC_NIST_P521 private key.\n Instead, use an RSA_AES wrapping algorithm or choose a longer RSA public key.
", "smithy.api#required": {} } } @@ -3409,7 +3421,7 @@ } ], "traits": { - "smithy.api#documentation": "Imports key material into an existing symmetric encryption KMS key that was created\n without key material. After you successfully import key material into a KMS key, you can\n reimport the same key material into that KMS key, but you cannot import different\n key material.
\nYou cannot perform this operation on an asymmetric KMS key, an HMAC KMS key, or on any KMS key in a different Amazon Web Services account. For more information about creating KMS keys with no key material\n and then importing key material, see Importing Key Material in the\n Key Management Service Developer Guide.
\nBefore using this operation, call GetParametersForImport. Its response\n includes a public key and an import token. Use the public key to encrypt the key material.\n Then, submit the import token from the same GetParametersForImport
\n response.
When calling this operation, you must specify the following values:
\nThe key ID or key ARN of a KMS key with no key material. Its Origin
must\n be EXTERNAL
.
To create a KMS key with no key material, call CreateKey and set the\n value of its Origin
parameter to EXTERNAL
. To get the\n Origin
of a KMS key, call DescribeKey.)
The encrypted key material. To get the public key to encrypt the key material, call\n GetParametersForImport.
\nThe import token that GetParametersForImport returned. You must use\n a public key and token from the same GetParametersForImport
response.
Whether the key material expires (ExpirationModel
) and, if so, when\n (ValidTo
). If you set an expiration date, on the specified date, KMS\n deletes the key material from the KMS key, making the KMS key unusable. To use the KMS key\n in cryptographic operations again, you must reimport the same key material. The only way\n to change the expiration model or expiration date is by reimporting the same key material\n and specifying a new expiration date.
When this operation is successful, the key state of the KMS key changes from\n PendingImport
to Enabled
, and you can use the KMS key.
If this operation fails, use the exception to help determine the problem. If the error is\n related to the key material, the import token, or wrapping key, use GetParametersForImport to get a new public key and import token for the KMS key\n and repeat the import procedure. For help, see How To Import Key\n Material in the Key Management Service Developer Guide.
\nThe KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.
\n\n Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.
\n\n Required permissions: kms:ImportKeyMaterial (key policy)
\n\n Related operations:\n
\nImports or reimports key material into an existing KMS key that was created without key\n material. ImportKeyMaterial
also sets the expiration model and expiration date of\n the imported key material.
By default, KMS keys are created with key material that KMS generates. This operation\n supports Importing key\n material, an advanced feature that lets you generate and import the cryptographic\n key material for a KMS key. For more information about importing key material into KMS, see\n Importing key\n material in the Key Management Service Developer Guide.
\nAfter you successfully import key material into a KMS key, you can reimport\n the same key material into that KMS key, but you cannot import different key\n material. You might reimport key material to replace key material that expired or key material\n that you deleted. You might also reimport key material to change the expiration model or\n expiration date of the key material. Before reimporting key material, if necessary, call DeleteImportedKeyMaterial to delete the current imported key material.
\nEach time you import key material into KMS, you can determine whether\n (ExpirationModel
) and when (ValidTo
) the key material expires. To\n change the expiration of your key material, you must import it again, either by calling\n ImportKeyMaterial
or using the import features of the\n KMS console.
Before calling ImportKeyMaterial
:
Create or identify a KMS key with no key material. The KMS key must have an\n Origin
value of EXTERNAL
, which indicates that the KMS key is\n designed for imported key material.
To create an new KMS key for imported key material, call the CreateKey operation with an Origin
value of EXTERNAL
. You can create a\n symmetric encryption KMS key, HMAC KMS key, asymmetric encryption KMS key, or asymmetric\n signing KMS key. You can also import key material into a multi-Region key of any\n supported type. However, you can't import key material into a KMS key in a custom key store.
Use the DescribeKey operation to verify that the\n KeyState
of the KMS key is PendingImport
, which indicates that\n the KMS key has no key material.
If you are reimporting the same key material into an existing KMS key, you might need\n to call the DeleteImportedKeyMaterial to delete its existing key\n material.
\nCall the GetParametersForImport operation to get a public key and\n import token set for importing key material.
\nUse the public key in the GetParametersForImport response to encrypt\n your key material.
\n Then, in an ImportKeyMaterial
request, you submit your encrypted key\n material and import token. When calling this operation, you must specify the following\n values:
The key ID or key ARN of the KMS key to associate with the imported key material. Its\n Origin
must be EXTERNAL
and its KeyState
must be\n PendingImport
. You cannot perform this operation on a KMS key in a custom key store, or on a KMS\n key in a different Amazon Web Services account. To get the Origin
and KeyState
\n of a KMS key, call DescribeKey.
The encrypted key material.
\nThe import token that GetParametersForImport returned. You must use\n a public key and token from the same GetParametersForImport
response.
Whether the key material expires (ExpirationModel
) and, if so, when\n (ValidTo
). For help with this choice, see Setting an expiration time in the Key Management Service Developer Guide.
If you set an expiration date, KMS deletes the key material from the KMS key on the\n specified date, making the KMS key unusable. To use the KMS key in cryptographic\n operations again, you must reimport the same key material. However, you can delete and\n reimport the key material at any time, including before the key material expires. Each\n time you reimport, you can eliminate or reset the expiration time.
\nWhen this operation is successful, the key state of the KMS key changes from\n PendingImport
to Enabled
, and you can use the KMS key in\n cryptographic operations.
If this operation fails, use the exception to help determine the problem. If the error is\n related to the key material, the import token, or wrapping key, use GetParametersForImport to get a new public key and import token for the KMS key\n and repeat the import procedure. For help, see How To Import Key\n Material in the Key Management Service Developer Guide.
\nThe KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.
\n\n Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.
\n\n Required permissions: kms:ImportKeyMaterial (key policy)
\n\n Related operations:\n
\nThe identifier of the symmetric encryption KMS key that receives the imported key\n material. This must be the same KMS key specified in the KeyID
parameter of the\n corresponding GetParametersForImport request. The Origin
of the\n KMS key must be EXTERNAL
. You cannot perform this operation on an asymmetric KMS\n key, an HMAC KMS key, a KMS key in a custom key store, or on a KMS key in a different\n Amazon Web Services account
Specify the key ID or key ARN of the KMS key.
\nFor example:
\nKey ID: 1234abcd-12ab-34cd-56ef-1234567890ab
\n
Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
\n
To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.
", + "smithy.api#documentation": "The identifier of the KMS key that will be associated with the imported key material. This\n must be the same KMS key specified in the KeyID
parameter of the corresponding\n GetParametersForImport request. The Origin
of the KMS key\n must be EXTERNAL
and its KeyState
must be\n PendingImport
.
The KMS key can be a symmetric encryption KMS key, HMAC KMS key, asymmetric encryption KMS\n key, or asymmetric signing KMS key, including a multi-Region key of any supported\n type. You cannot perform this operation on a KMS key in a custom key store, or on a KMS key in\n a different Amazon Web Services account.
\nSpecify the key ID or key ARN of the KMS key.
\nFor example:
\nKey ID: 1234abcd-12ab-34cd-56ef-1234567890ab
\n
Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
\n
To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.
", "smithy.api#required": {} } }, @@ -3432,7 +3444,7 @@ "EncryptedKeyMaterial": { "target": "com.amazonaws.kms#CiphertextType", "traits": { - "smithy.api#documentation": "The encrypted key material to import. The key material must be encrypted with the public\n wrapping key that GetParametersForImport returned, using the wrapping\n algorithm that you specified in the same GetParametersForImport
request.
The encrypted key material to import. The key material must be encrypted under the public\n wrapping key that GetParametersForImport returned, using the wrapping\n algorithm that you specified in the same GetParametersForImport
request.
Specifies whether the key material expires. The default is\n KEY_MATERIAL_EXPIRES
.
When the value of ExpirationModel
is KEY_MATERIAL_EXPIRES
, you\n must specify a value for the ValidTo
parameter. When value is\n KEY_MATERIAL_DOES_NOT_EXPIRE
, you must omit the ValidTo
\n parameter.
You cannot change the ExpirationModel
or ValidTo
values for the\n current import after the request completes. To change either value, you must delete (DeleteImportedKeyMaterial) and reimport the key material.
Specifies whether the key material expires. The default is\n KEY_MATERIAL_EXPIRES
. For help with this choice, see Setting an expiration time in the Key Management Service Developer Guide.
When the value of ExpirationModel
is KEY_MATERIAL_EXPIRES
, you\n must specify a value for the ValidTo
parameter. When value is\n KEY_MATERIAL_DOES_NOT_EXPIRE
, you must omit the ValidTo
\n parameter.
You cannot change the ExpirationModel
or ValidTo
values for the\n current import after the request completes. To change either value, you must reimport the key\n material.
Schedules the deletion of a KMS key. By default, KMS applies a waiting period of 30\n days, but you can specify a waiting period of 7-30 days. When this operation is successful,\n the key state of the KMS key changes to PendingDeletion
and the key can't be used\n in any cryptographic operations. It remains in this state for the duration of the waiting\n period. Before the waiting period ends, you can use CancelKeyDeletion to\n cancel the deletion of the KMS key. After the waiting period ends, KMS deletes the KMS key,\n its key material, and all KMS data associated with it, including all aliases that refer to\n it.
Deleting a KMS key is a destructive and potentially dangerous operation. When a KMS key\n is deleted, all data that was encrypted under the KMS key is unrecoverable. (The only\n exception is a multi-Region replica key.) To prevent the use of a KMS key without deleting\n it, use DisableKey.
\nYou can schedule the deletion of a multi-Region primary key and its replica keys at any\n time. However, KMS will not delete a multi-Region primary key with existing replica keys. If\n you schedule the deletion of a primary key with replicas, its key state changes to\n PendingReplicaDeletion
and it cannot be replicated or used in cryptographic\n operations. This status can continue indefinitely. When the last of its replicas keys is\n deleted (not just scheduled), the key state of the primary key changes to\n PendingDeletion
and its waiting period (PendingWindowInDays
)\n begins. For details, see Deleting multi-Region keys in the\n Key Management Service Developer Guide.
When KMS deletes\n a KMS key from an CloudHSM key store, it makes a best effort to delete the associated\n key material from the associated CloudHSM cluster. However, you might need to manually delete\n the orphaned key material from the cluster and its backups. Deleting a KMS key from an\n external key store has no effect on the associated external key. However, for both\n types of custom key stores, deleting a KMS key is destructive and irreversible. You cannot\n decrypt ciphertext encrypted under the KMS key by using only its associated external key or\n CloudHSM key. Also, you cannot recreate a KMS key in an external key store by creating a new KMS\n key with the same key material.
\nFor more information about scheduling a KMS key for deletion, see Deleting KMS keys in the\n Key Management Service Developer Guide.
\nThe KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.
\n\n Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.
\n\n Required permissions: kms:ScheduleKeyDeletion (key\n policy)
\n\n Related operations\n
\n\n CancelKeyDeletion\n
\n\n DisableKey\n
\nSchedules the deletion of a KMS key. By default, KMS applies a waiting period of 30\n days, but you can specify a waiting period of 7-30 days. When this operation is successful,\n the key state of the KMS key changes to PendingDeletion
and the key can't be used\n in any cryptographic operations. It remains in this state for the duration of the waiting\n period. Before the waiting period ends, you can use CancelKeyDeletion to\n cancel the deletion of the KMS key. After the waiting period ends, KMS deletes the KMS key,\n its key material, and all KMS data associated with it, including all aliases that refer to\n it.
Deleting a KMS key is a destructive and potentially dangerous operation. When a KMS key\n is deleted, all data that was encrypted under the KMS key is unrecoverable. (The only\n exception is a multi-Region replica\n key, or an asymmetric or HMAC KMS key with imported key material[BUGBUG-link to\n importing-keys-managing.html#import-delete-key.) To prevent the use of a KMS key without\n deleting it, use DisableKey.
\nYou can schedule the deletion of a multi-Region primary key and its replica keys at any\n time. However, KMS will not delete a multi-Region primary key with existing replica keys. If\n you schedule the deletion of a primary key with replicas, its key state changes to\n PendingReplicaDeletion
and it cannot be replicated or used in cryptographic\n operations. This status can continue indefinitely. When the last of its replicas keys is\n deleted (not just scheduled), the key state of the primary key changes to\n PendingDeletion
and its waiting period (PendingWindowInDays
)\n begins. For details, see Deleting multi-Region keys in the\n Key Management Service Developer Guide.
When KMS deletes\n a KMS key from an CloudHSM key store, it makes a best effort to delete the associated\n key material from the associated CloudHSM cluster. However, you might need to manually delete\n the orphaned key material from the cluster and its backups. Deleting a KMS key from an\n external key store has no effect on the associated external key. However, for both\n types of custom key stores, deleting a KMS key is destructive and irreversible. You cannot\n decrypt ciphertext encrypted under the KMS key by using only its associated external key or\n CloudHSM key. Also, you cannot recreate a KMS key in an external key store by creating a new KMS\n key with the same key material.
\nFor more information about scheduling a KMS key for deletion, see Deleting KMS keys in the\n Key Management Service Developer Guide.
\nThe KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.
\n\n Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.
\n\n Required permissions: kms:ScheduleKeyDeletion (key\n policy)
\n\n Related operations\n
\n\n CancelKeyDeletion\n
\n\n DisableKey\n
\nThe waiting period, specified in number of days. After the waiting period ends, KMS\n deletes the KMS key.
\nIf the KMS key is a multi-Region primary key with replica keys, the waiting period begins\n when the last of its replica keys is deleted. Otherwise, the waiting period begins\n immediately.
\nThis value is optional. If you include a value, it must be between 7 and 30, inclusive. If\n you do not include a value, it defaults to 30.
" + "smithy.api#documentation": "The waiting period, specified in number of days. After the waiting period ends, KMS\n deletes the KMS key.
\nIf the KMS key is a multi-Region primary key with replica keys, the waiting period begins\n when the last of its replica keys is deleted. Otherwise, the waiting period begins\n immediately.
\nThis value is optional. If you include a value, it must be between 7 and 30, inclusive. If\n you do not include a value, it defaults to 30. You can use the \n kms:ScheduleKeyDeletionPendingWindowInDays
\n \n condition key to further constrain the values that principals can specify in the \n PendingWindowInDays
parameter.
The cryptographic signature that was generated for the message.
\nWhen used with the supported RSA signing algorithms, the encoding of this value is\n defined by PKCS #1 in RFC\n 8017.
\nWhen used with the ECDSA_SHA_256
, ECDSA_SHA_384
, or\n ECDSA_SHA_512
signing algorithms, this value is a DER-encoded object as\n defined by ANS X9.62–2005 and RFC 3279 Section 2.2.3.\n This is the most commonly used signature format and is appropriate for most uses.\n
When you use the HTTP API or the Amazon Web Services CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded.
" + "smithy.api#documentation": "The cryptographic signature that was generated for the message.
\nWhen used with the supported RSA signing algorithms, the encoding of this value is\n defined by PKCS #1 in RFC\n 8017.
\nWhen used with the ECDSA_SHA_256
, ECDSA_SHA_384
, or\n ECDSA_SHA_512
signing algorithms, this value is a DER-encoded object as\n defined by ANSI X9.62–2005 and RFC 3279 Section 2.2.3.\n This is the most commonly used signature format and is appropriate for most uses.\n
When you use the HTTP API or the Amazon Web Services CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded.
" } }, "SigningAlgorithm": { @@ -7827,6 +7839,18 @@ "traits": { "smithy.api#enumValue": "RSA_2048" } + }, + "RSA_3072": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RSA_3072" + } + }, + "RSA_4096": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RSA_4096" + } } } }, diff --git a/aws/sdk/aws-models/lambda.json b/aws/sdk/aws-models/lambda.json index b7c3fb34ca0268e3d62ae8f601a5edd50874e4ae..df1e4216396295c44edde192633ebd9d73be1f99 100644 --- a/aws/sdk/aws-models/lambda.json +++ b/aws/sdk/aws-models/lambda.json @@ -2799,7 +2799,7 @@ "KMSKeyArn": { "target": "com.amazonaws.lambda#KMSKeyArn", "traits": { - "smithy.api#documentation": "The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt your function's environment variables. When Lambda SnapStart is activated, this key is also used to encrypt your function's snapshot. If you don't provide a customer managed key, Lambda uses a default service key.
" + "smithy.api#documentation": "The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt your function's \nenvironment variables. When \nLambda SnapStart is activated, Lambda also uses \nthis key is to encrypt your function's snapshot. If you deploy your function using a container image, Lambda also uses this key to \nencrypt your function when it's deployed. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry (Amazon ECR).\nIf you don't provide a customer managed key, Lambda uses a default service key.
" } }, "TracingConfig": { @@ -4014,7 +4014,7 @@ "MaximumRecordAgeInSeconds": { "target": "com.amazonaws.lambda#MaximumRecordAgeInSeconds", "traits": { - "smithy.api#documentation": "(Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is -1,\nwhich sets the maximum age to infinite. When the value is set to infinite, Lambda never discards old records.
\nThe minimum value that can be set is 60 seconds.
\n(Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is -1,\nwhich sets the maximum age to infinite. When the value is set to infinite, Lambda never discards old records.
\nThe minimum valid value for maximum record age is 60s. Although values less than 60 and greater than -1 fall within the parameter's absolute range, they are not allowed
\nThe layer's compatible runtimes.
" + "smithy.api#documentation": "The layer's compatible runtimes.
\nThe following list includes deprecated runtimes. For more information, see Runtime deprecation policy.
" } }, "LicenseInfo": { @@ -7270,7 +7270,7 @@ "CompatibleRuntimes": { "target": "com.amazonaws.lambda#CompatibleRuntimes", "traits": { - "smithy.api#documentation": "The layer's compatible runtimes.
" + "smithy.api#documentation": "The layer's compatible runtimes.
\nThe following list includes deprecated runtimes. For more information, see Runtime deprecation policy.
" } }, "LicenseInfo": { @@ -7990,7 +7990,7 @@ "CompatibleRuntime": { "target": "com.amazonaws.lambda#Runtime", "traits": { - "smithy.api#documentation": "A runtime identifier. For example, go1.x
.
A runtime identifier. For example, go1.x
.
The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.
", "smithy.api#httpQuery": "CompatibleRuntime" } }, @@ -8088,7 +8088,7 @@ "CompatibleRuntime": { "target": "com.amazonaws.lambda#Runtime", "traits": { - "smithy.api#documentation": "A runtime identifier. For example, go1.x
.
A runtime identifier. For example, go1.x
.
The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.
", "smithy.api#httpQuery": "CompatibleRuntime" } }, @@ -8875,7 +8875,7 @@ "CompatibleRuntimes": { "target": "com.amazonaws.lambda#CompatibleRuntimes", "traits": { - "smithy.api#documentation": "A list of compatible function\n runtimes. Used for filtering with ListLayers and ListLayerVersions.
" + "smithy.api#documentation": "A list of compatible function\n runtimes. Used for filtering with ListLayers and ListLayerVersions.
\nThe following list includes deprecated runtimes. For more information, see Runtime deprecation policy.
" } }, "LicenseInfo": { @@ -8938,7 +8938,7 @@ "CompatibleRuntimes": { "target": "com.amazonaws.lambda#CompatibleRuntimes", "traits": { - "smithy.api#documentation": "The layer's compatible runtimes.
" + "smithy.api#documentation": "The layer's compatible runtimes.
\nThe following list includes deprecated runtimes. For more information, see Runtime deprecation policy.
" } }, "LicenseInfo": { @@ -9942,6 +9942,12 @@ "traits": { "smithy.api#enumValue": "java17" } + }, + "ruby32": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ruby3.2" + } } } }, @@ -11426,7 +11432,7 @@ "KMSKeyArn": { "target": "com.amazonaws.lambda#KMSKeyArn", "traits": { - "smithy.api#documentation": "The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt your function's environment variables. When Lambda SnapStart is activated, this key is also used to encrypt your function's snapshot. If you don't provide a customer managed key, Lambda uses a default service key.
" + "smithy.api#documentation": "The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt your function's \nenvironment variables. When \nLambda SnapStart is activated, Lambda also uses \nthis key is to encrypt your function's snapshot. If you deploy your function using a container image, Lambda also uses this key to \nencrypt your function when it's deployed. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry (Amazon ECR).\nIf you don't provide a customer managed key, Lambda uses a default service key.
" } }, "TracingConfig": { diff --git a/aws/sdk/aws-models/polly.json b/aws/sdk/aws-models/polly.json index 70ddbca064f4a6b8409a5804915bf625295d20f6..219c1a0a927b425d603e53038d6bcca8627fcb22 100644 --- a/aws/sdk/aws-models/polly.json +++ b/aws/sdk/aws-models/polly.json @@ -677,6 +677,12 @@ "traits": { "smithy.api#enumValue": "fi-FI" } + }, + "en_IE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "en-IE" + } } } }, @@ -3371,6 +3377,18 @@ "traits": { "smithy.api#enumValue": "Tomoko" } + }, + "Niamh": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Niamh" + } + }, + "Sofie": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Sofie" + } } } }, diff --git a/aws/sdk/aws-models/s3.json b/aws/sdk/aws-models/s3.json index a08417723f52af8e40c42cf119b2ce7a55226bd8..f0227a4af82df327387802e426feab8a8700eb78 100644 --- a/aws/sdk/aws-models/s3.json +++ b/aws/sdk/aws-models/s3.json @@ -62,6 +62,18 @@ ], "traits": { "smithy.api#documentation": "This action aborts a multipart upload. After a multipart upload is aborted, no\n additional parts can be uploaded using that upload ID. The storage consumed by any\n previously uploaded parts will be freed. However, if any part uploads are currently in\n progress, those part uploads might or might not succeed. As a result, it might be necessary\n to abort a given multipart upload multiple times in order to completely free all storage\n consumed by all parts.
\nTo verify that all parts have been removed, so you don't get charged for the part\n storage, you should call the ListParts action and ensure that\n the parts list is empty.
\nFor information about permissions required to use the multipart upload, see Multipart Upload\n and Permissions.
\nThe following operations are related to AbortMultipartUpload
:
\n UploadPart\n
\n\n ListParts\n
\n\n ListMultipartUploads\n
\nThe server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256, aws:kms
).
The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256
, aws:kms
).
If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n encryption customer managed key that was used for the object.
", + "smithy.api#documentation": "If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -20053,7 +21541,7 @@ "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Amazon Web Services KMS (SSE-KMS).
", + "smithy.api#documentation": "Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -20335,7 +21823,24 @@ } ], "traits": { - "smithy.api#documentation": "Creates a copy of an object that is already stored in Amazon S3.
\nYou can store individual objects of up to 5 TB in Amazon S3. You create a copy of your\n object up to 5 GB in size in a single atomic action using this API. However, to copy an\n object greater than 5 GB, you must use the multipart upload Upload Part - Copy\n (UploadPartCopy) API. For more information, see Copy Object Using the\n REST Multipart Upload API.
\nAll copy requests must be authenticated. Additionally, you must have\n read access to the source object and write\n access to the destination bucket. For more information, see REST Authentication. Both the\n Region that you want to copy the object from and the Region that you want to copy the\n object to must be enabled for your account.
\nA copy request might return an error when Amazon S3 receives the copy request or while Amazon S3\n is copying the files. If the error occurs before the copy action starts, you receive a\n standard Amazon S3 error. If the error occurs during the copy operation, the error response is\n embedded in the 200 OK
response. This means that a 200 OK
\n response can contain either a success or an error. If you call the S3 API directly, make\n sure to design your application to parse the contents of the response and handle it\n appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the\n embedded error and apply error handling per your configuration settings (including\n automatically retrying the request as appropriate). If the condition persists, the SDKs\n throws an exception (or, for the SDKs that don't use exceptions, they return the\n error).
If the copy is successful, you receive a response with information about the copied\n object.
\nIf the request is an HTTP 1.1 request, the response is chunk encoded. If it were not,\n it would not contain the content-length, and you would need to read the entire\n body.
\nThe copy request charge is based on the storage class and Region that you specify for\n the destination object. For pricing information, see Amazon S3 pricing.
\nAmazon S3 transfer acceleration does not support cross-Region copies. If you request a\n cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad\n Request
error. For more information, see Transfer\n Acceleration.
When copying an object, you can preserve all metadata (default) or specify new metadata.\n However, the ACL is not preserved and is set to private for the user making the request. To\n override the default ACL setting, specify a new ACL when generating a copy request. For\n more information, see Using ACLs.
\nTo specify whether you want the object metadata copied from the source object or\n replaced with metadata provided in the request, you can optionally add the\n x-amz-metadata-directive
header. When you grant permissions, you can use\n the s3:x-amz-metadata-directive
condition key to enforce certain metadata\n behavior when objects are uploaded. For more information, see Specifying Conditions in a\n Policy in the Amazon S3 User Guide. For a complete list of\n Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for\n Amazon S3.
\n x-amz-website-redirect-location
is unique to each object and must be\n specified in the request headers to copy the value.
To only copy an object under certain conditions, such as whether the Etag
\n matches or whether the object was modified before or after a specified date, use the\n following request parameters:
\n x-amz-copy-source-if-match
\n
\n x-amz-copy-source-if-none-match
\n
\n x-amz-copy-source-if-unmodified-since
\n
\n x-amz-copy-source-if-modified-since
\n
If both the x-amz-copy-source-if-match
and\n x-amz-copy-source-if-unmodified-since
headers are present in the request\n and evaluate as follows, Amazon S3 returns 200 OK
and copies the data:
\n x-amz-copy-source-if-match
condition evaluates to true
\n x-amz-copy-source-if-unmodified-since
condition evaluates to\n false
If both the x-amz-copy-source-if-none-match
and\n x-amz-copy-source-if-modified-since
headers are present in the request and\n evaluate as follows, Amazon S3 returns the 412 Precondition Failed
response\n code:
\n x-amz-copy-source-if-none-match
condition evaluates to false
\n x-amz-copy-source-if-modified-since
condition evaluates to\n true
All headers with the x-amz-
prefix, including\n x-amz-copy-source
, must be signed.
Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket. When\n copying an object, if you don't specify encryption information in your copy request, the\n encryption setting of the target object is set to the default encryption configuration of\n the destination bucket. By default, all buckets have a base level of encryption\n configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the\n destination bucket has a default encryption configuration that uses server-side encryption\n with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C),\n Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the target\n object copy.
\nWhen you perform a CopyObject operation, if you want to use a different type\n of encryption setting for the target object, you can use other appropriate\n encryption-related headers to encrypt the target object with a KMS key, an Amazon S3 managed\n key, or a customer-provided key. With server-side encryption, Amazon S3 encrypts your data as it\n writes it to disks in its data centers and decrypts the data when you access it. If the\n encryption setting in your request is different from the default encryption configuration\n of the destination bucket, the encryption setting in your request takes precedence. If the\n source object for the copy is stored in Amazon S3 using SSE-C, you must provide the necessary\n encryption information in your request so that Amazon S3 can decrypt the object for copying. For\n more information about server-side encryption, see Using Server-Side\n Encryption.
\nIf a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. For\n more information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.
\nWhen copying an object, you can optionally use headers to grant ACL-based permissions.\n By default, all objects are private. Only the owner has full access control. When adding a\n new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups\n defined by Amazon S3. These permissions are then added to the ACL on the object. For more\n information, see Access Control List (ACL) Overview and Managing ACLs Using the REST\n API.
\nIf the bucket that you're copying objects to uses the bucket owner enforced setting for\n S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use\n this setting only accept PUT requests that don't specify an ACL or PUT requests that\n specify bucket owner full control ACLs, such as the bucket-owner-full-control
\n canned ACL or an equivalent form of this ACL expressed in the XML format.
For more information, see Controlling ownership of\n objects and disabling ACLs in the Amazon S3 User Guide.
\nIf your bucket uses the bucket owner enforced setting for Object Ownership, all\n objects written to the bucket by any account will be owned by the bucket owner.
\nWhen copying an object, if it has a checksum, that checksum will be copied to the new\n object by default. When you copy the object over, you may optionally specify a different\n checksum algorithm to use with the x-amz-checksum-algorithm
header.
You can use the CopyObject
action to change the storage class of an object\n that is already stored in Amazon S3 using the StorageClass
parameter. For more\n information, see Storage Classes in the\n Amazon S3 User Guide.
If the source object's storage class is GLACIER, you must restore a copy of\n this object before you can use it as a source object for the copy operation. For\n more information, see RestoreObject. For\n more information, see Copying\n Objects.
\nBy default, x-amz-copy-source
identifies the current version of an object\n to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was\n deleted. To copy a different version, use the versionId
subresource.
If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for\n the object being copied. This version ID is different from the version ID of the source\n object. Amazon S3 returns the version ID of the copied object in the\n x-amz-version-id
response header in the response.
If you do not enable versioning or suspend it on the target bucket, the version ID that\n Amazon S3 generates is always null.
\nThe following operations are related to CopyObject
:
Creates a copy of an object that is already stored in Amazon S3.
\nYou can store individual objects of up to 5 TB in Amazon S3. You create a copy of your\n object up to 5 GB in size in a single atomic action using this API. However, to copy an\n object greater than 5 GB, you must use the multipart upload Upload Part - Copy\n (UploadPartCopy) API. For more information, see Copy Object Using the\n REST Multipart Upload API.
\nAll copy requests must be authenticated. Additionally, you must have\n read access to the source object and write\n access to the destination bucket. For more information, see REST Authentication. Both the\n Region that you want to copy the object from and the Region that you want to copy the\n object to must be enabled for your account.
\nA copy request might return an error when Amazon S3 receives the copy request or while Amazon S3\n is copying the files. If the error occurs before the copy action starts, you receive a\n standard Amazon S3 error. If the error occurs during the copy operation, the error response is\n embedded in the 200 OK
response. This means that a 200 OK
\n response can contain either a success or an error. If you call the S3 API directly, make\n sure to design your application to parse the contents of the response and handle it\n appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the\n embedded error and apply error handling per your configuration settings (including\n automatically retrying the request as appropriate). If the condition persists, the SDKs\n throws an exception (or, for the SDKs that don't use exceptions, they return the\n error).
If the copy is successful, you receive a response with information about the copied\n object.
\nIf the request is an HTTP 1.1 request, the response is chunk encoded. If it were not,\n it would not contain the content-length, and you would need to read the entire\n body.
\nThe copy request charge is based on the storage class and Region that you specify for\n the destination object. For pricing information, see Amazon S3 pricing.
\nAmazon S3 transfer acceleration does not support cross-Region copies. If you request a\n cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad\n Request
error. For more information, see Transfer\n Acceleration.
When copying an object, you can preserve all metadata (the default) or specify new metadata.\n However, the access control list (ACL) is not preserved and is set to private for the user making the request. To\n override the default ACL setting, specify a new ACL when generating a copy request. For\n more information, see Using ACLs.
\nTo specify whether you want the object metadata copied from the source object or\n replaced with metadata provided in the request, you can optionally add the\n x-amz-metadata-directive
header. When you grant permissions, you can use\n the s3:x-amz-metadata-directive
condition key to enforce certain metadata\n behavior when objects are uploaded. For more information, see Specifying Conditions in a\n Policy in the Amazon S3 User Guide. For a complete list of\n Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for\n Amazon S3.
\n x-amz-website-redirect-location
is unique to each object and must be\n specified in the request headers to copy the value.
To only copy an object under certain conditions, such as whether the Etag
\n matches or whether the object was modified before or after a specified date, use the\n following request parameters:
\n x-amz-copy-source-if-match
\n
\n x-amz-copy-source-if-none-match
\n
\n x-amz-copy-source-if-unmodified-since
\n
\n x-amz-copy-source-if-modified-since
\n
If both the x-amz-copy-source-if-match
and\n x-amz-copy-source-if-unmodified-since
headers are present in the request\n and evaluate as follows, Amazon S3 returns 200 OK
and copies the data:
\n x-amz-copy-source-if-match
condition evaluates to true
\n x-amz-copy-source-if-unmodified-since
condition evaluates to\n false
If both the x-amz-copy-source-if-none-match
and\n x-amz-copy-source-if-modified-since
headers are present in the request and\n evaluate as follows, Amazon S3 returns the 412 Precondition Failed
response\n code:
\n x-amz-copy-source-if-none-match
condition evaluates to false
\n x-amz-copy-source-if-modified-since
condition evaluates to\n true
All headers with the x-amz-
prefix, including\n x-amz-copy-source
, must be signed.
Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket. When\n copying an object, if you don't specify encryption information in your copy\n request, the encryption setting of the target object is set to the default\n encryption configuration of the destination bucket. By default, all buckets have a\n base level of encryption configuration that uses server-side encryption with Amazon S3\n managed keys (SSE-S3). If the destination bucket has a default encryption\n configuration that uses server-side encryption with Key Management Service (KMS) keys\n (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or\n server-side encryption with customer-provided encryption keys (SSE-C), Amazon S3 uses\n the corresponding KMS key, or a customer-provided key to encrypt the target\n object copy.
\nWhen you perform a CopyObject
operation, if you want to use a different type\n of encryption setting for the target object, you can use other appropriate\n encryption-related headers to encrypt the target object with a KMS key, an Amazon S3 managed\n key, or a customer-provided key. With server-side encryption, Amazon S3 encrypts your data as it\n writes your data to disks in its data centers and decrypts the data when you access it. If the\n encryption setting in your request is different from the default encryption configuration\n of the destination bucket, the encryption setting in your request takes precedence. If the\n source object for the copy is stored in Amazon S3 using SSE-C, you must provide the necessary\n encryption information in your request so that Amazon S3 can decrypt the object for copying. For\n more information about server-side encryption, see Using Server-Side\n Encryption.
If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the\n object. For more information, see Amazon S3 Bucket Keys in the\n Amazon S3 User Guide.
\nWhen copying an object, you can optionally use headers to grant ACL-based permissions.\n By default, all objects are private. Only the owner has full access control. When adding a\n new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups\n that are defined by Amazon S3. These permissions are then added to the ACL on the object. For more\n information, see Access Control List (ACL) Overview and Managing ACLs Using the REST\n API.
\nIf the bucket that you're copying objects to uses the bucket owner enforced setting for\n S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use\n this setting only accept PUT
requests that don't specify an ACL or PUT
requests that\n specify bucket owner full control ACLs, such as the bucket-owner-full-control
\n canned ACL or an equivalent form of this ACL expressed in the XML format.
For more information, see Controlling ownership of\n objects and disabling ACLs in the Amazon S3 User Guide.
\nIf your bucket uses the bucket owner enforced setting for Object Ownership, all\n objects written to the bucket by any account will be owned by the bucket owner.
\nWhen copying an object, if it has a checksum, that checksum will be copied to the new\n object by default. When you copy the object over, you can optionally specify a different\n checksum algorithm to use with the x-amz-checksum-algorithm
header.
You can use the CopyObject
action to change the storage class of an object\n that is already stored in Amazon S3 by using the StorageClass
parameter. For more\n information, see Storage Classes in the\n Amazon S3 User Guide.
If the source object's storage class is GLACIER, you must restore a copy of\n this object before you can use it as a source object for the copy operation. For\n more information, see RestoreObject. For\n more information, see Copying\n Objects.
\nBy default, x-amz-copy-source
header identifies the current version of an object\n to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was\n deleted. To copy a different version, use the versionId
subresource.
If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for\n the object being copied. This version ID is different from the version ID of the source\n object. Amazon S3 returns the version ID of the copied object in the\n x-amz-version-id
response header in the response.
If you do not enable versioning or suspend it on the target bucket, the version ID that\n Amazon S3 generates is always null.
\nThe following operations are related to CopyObject
:
The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256, aws:kms
).
The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256
, aws:kms
, aws:kms:dsse
).
If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n encryption customer managed key that was used for the object.
", + "smithy.api#documentation": "If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -20413,7 +21918,7 @@ "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "Indicates whether the copied object uses an S3 Bucket Key for server-side encryption\n with Amazon Web Services KMS (SSE-KMS).
", + "smithy.api#documentation": "Indicates whether the copied object uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -20594,7 +22099,7 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256, aws:kms
).
The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256
, aws:kms
, aws:kms:dsse
).
Specifies the Amazon Web Services KMS key ID to use for object encryption. All GET and PUT requests\n for an object protected by Amazon Web Services KMS will fail if not made via SSL or using SigV4. For\n information about configuring using any of the officially supported Amazon Web Services SDKs and Amazon Web Services\n CLI, see Specifying the\n Signature Version in Request Authentication in the\n Amazon S3 User Guide.
", + "smithy.api#documentation": "Specifies the KMS key ID to use for object encryption. All GET and PUT requests for an\n object protected by KMS will fail if they're not made via SSL or using SigV4. For\n information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see\n Specifying the\n Signature Version in Request Authentication in the\n Amazon S3 User Guide.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -20651,7 +22156,7 @@ "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with\n server-side encryption using AWS KMS (SSE-KMS). Setting this header to true
\n causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.
Specifying this header with a COPY action doesn’t affect bucket-level settings for S3\n Bucket Key.
", + "smithy.api#documentation": "Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with\n server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to\n true
causes Amazon S3 to use an S3 Bucket Key for object encryption with\n SSE-KMS.
Specifying this header with a COPY action doesn’t affect bucket-level settings for S3\n Bucket Key.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -20870,7 +22375,19 @@ } ], "traits": { - "smithy.api#documentation": "Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 and have a\n valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to\n create buckets. By creating the bucket, you become the bucket owner.
\nNot every string is an acceptable bucket name. For information about bucket naming\n restrictions, see Bucket naming\n rules.
\nIf you want to create an Amazon S3 on Outposts bucket, see Create Bucket.
\nBy default, the bucket is created in the US East (N. Virginia) Region. You can\n optionally specify a Region in the request body. You might choose a Region to optimize\n latency, minimize costs, or address regulatory requirements. For example, if you reside in\n Europe, you will probably find it advantageous to create buckets in the Europe (Ireland)\n Region. For more information, see Accessing a\n bucket.
\nIf you send your create bucket request to the s3.amazonaws.com
endpoint,\n the request goes to the us-east-1 Region. Accordingly, the signature calculations in\n Signature Version 4 must use us-east-1 as the Region, even if the location constraint in\n the request specifies another Region where the bucket is to be created. If you create a\n bucket in a Region other than US East (N. Virginia), your application must be able to\n handle 307 redirect. For more information, see Virtual hosting of\n buckets.
When creating a bucket using this operation, you can optionally configure the bucket ACL\n to specify the accounts or groups that should be granted specific permissions on the\n bucket.
\nIf your CreateBucket request sets bucket owner enforced for S3 Object Ownership and\n specifies a bucket ACL that provides access to an external Amazon Web Services account, your request\n fails with a 400
error and returns the\n InvalidBucketAclWithObjectOwnership
error code. For more information,\n see Controlling object\n ownership in the Amazon S3 User Guide.
There are two ways to grant the appropriate permissions using the request\n headers.
\nSpecify a canned ACL using the x-amz-acl
request header. Amazon S3\n supports a set of predefined ACLs, known as canned ACLs. Each\n canned ACL has a predefined set of grantees and permissions. For more information,\n see Canned ACL.
Specify access permissions explicitly using the x-amz-grant-read
,\n x-amz-grant-write
, x-amz-grant-read-acp
,\n x-amz-grant-write-acp
, and x-amz-grant-full-control
\n headers. These headers map to the set of permissions Amazon S3 supports in an ACL. For\n more information, see Access control list (ACL)\n overview.
You specify each grantee as a type=value pair, where the type is one of the\n following:
\n\n id
– if the value specified is the canonical user ID of an\n Amazon Web Services account
\n uri
– if you are granting permissions to a predefined\n group
\n emailAddress
– if the value specified is the email address of\n an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
\nFor example, the following x-amz-grant-read
header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata:
\n x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"
\n
You can use either a canned ACL or specify access permissions explicitly. You cannot\n do both.
\nIn addition to s3:CreateBucket
, the following permissions are required when\n your CreateBucket includes specific headers:
\n ACLs - If your CreateBucket
request\n specifies ACL permissions and the ACL is public-read, public-read-write,\n authenticated-read, or if you specify access permissions explicitly through any other\n ACL, both s3:CreateBucket
and s3:PutBucketAcl
permissions\n are needed. If the ACL the CreateBucket
request is private or doesn't\n specify any ACLs, only s3:CreateBucket
permission is needed.
\n Object Lock - If\n ObjectLockEnabledForBucket
is set to true in your\n CreateBucket
request,\n s3:PutBucketObjectLockConfiguration
and\n s3:PutBucketVersioning
permissions are required.
\n S3 Object Ownership - If your CreateBucket\n request includes the x-amz-object-ownership
header,\n s3:PutBucketOwnershipControls
permission is required.
The following operations are related to CreateBucket
:
\n PutObject\n
\n\n DeleteBucket\n
\nCreates a new S3 bucket. To create a bucket, you must register with Amazon S3 and have a\n valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to\n create buckets. By creating the bucket, you become the bucket owner.
\nNot every string is an acceptable bucket name. For information about bucket naming\n restrictions, see Bucket naming\n rules.
\nIf you want to create an Amazon S3 on Outposts bucket, see Create Bucket.
\nBy default, the bucket is created in the US East (N. Virginia) Region. You can\n optionally specify a Region in the request body. You might choose a Region to optimize\n latency, minimize costs, or address regulatory requirements. For example, if you reside in\n Europe, you will probably find it advantageous to create buckets in the Europe (Ireland)\n Region. For more information, see Accessing a\n bucket.
\nIf you send your create bucket request to the s3.amazonaws.com
endpoint,\n the request goes to the us-east-1
Region. Accordingly, the signature calculations in\n Signature Version 4 must use us-east-1
as the Region, even if the location constraint in\n the request specifies another Region where the bucket is to be created. If you create a\n bucket in a Region other than US East (N. Virginia), your application must be able to\n handle 307 redirect. For more information, see Virtual hosting of\n buckets.
In addition to s3:CreateBucket
, the following permissions are required when\n your CreateBucket
request includes specific headers:
\n Access control lists (ACLs) - If your CreateBucket
request\n specifies access control list (ACL) permissions and the ACL is public-read, public-read-write,\n authenticated-read, or if you specify access permissions explicitly through any other\n ACL, both s3:CreateBucket
and s3:PutBucketAcl
permissions\n are needed. If the ACL for the CreateBucket
request is private or if the request doesn't\n specify any ACLs, only s3:CreateBucket
permission is needed.
\n Object Lock - If ObjectLockEnabledForBucket
is set to true in your\n CreateBucket
request,\n s3:PutBucketObjectLockConfiguration
and\n s3:PutBucketVersioning
permissions are required.
\n S3 Object Ownership - If your CreateBucket
request includes the x-amz-object-ownership
header, then the\n s3:PutBucketOwnershipControls
permission is required. By default, ObjectOwnership
is set to BucketOWnerEnforced
and ACLs are disabled. We recommend keeping\n ACLs disabled, except in uncommon use cases where you must control access for each object individually. If you want to change the ObjectOwnership
setting, you can use the \n x-amz-object-ownership
header in your CreateBucket
request to set the ObjectOwnership
setting of your choice.\n For more information about S3 Object Ownership, see Controlling object\n ownership in the Amazon S3 User Guide.
\n S3 Block Public Access - If your specific use case requires granting public access to your S3 resources, you can disable Block Public Access. You can create a new bucket with Block Public Access enabled, then separately call the \n DeletePublicAccessBlock
\n API. To use this operation, you must have the\n s3:PutBucketPublicAccessBlock
permission. By default, all Block\n Public Access settings are enabled for new buckets. To avoid inadvertent exposure of\n your resources, we recommend keeping the S3 Block Public Access settings enabled. For more information about S3 Block Public Access, see Blocking public\n access to your Amazon S3 storage in the Amazon S3 User Guide.
If your CreateBucket
request sets BucketOwnerEnforced
for Amazon S3 Object Ownership\n and specifies a bucket ACL that provides access to an external Amazon Web Services account, your request fails with a 400
error and returns the InvalidBucketAcLWithObjectOwnership
error code. For more information,\n see Setting Object\n Ownership on an existing bucket in the Amazon S3 User Guide.
The following operations are related to CreateBucket
:
\n PutObject\n
\n\n DeleteBucket\n
\nThis action initiates a multipart upload and returns an upload ID. This upload ID is\n used to associate all of the parts in the specific multipart upload. You specify this\n upload ID in each of your subsequent upload part requests (see UploadPart). You also include this\n upload ID in the final request to either complete or abort the multipart upload\n request.
\nFor more information about multipart uploads, see Multipart Upload Overview.
\nIf you have configured a lifecycle rule to abort incomplete multipart uploads, the\n upload must complete within the number of days specified in the bucket lifecycle\n configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort\n action and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration.
\nFor information about the permissions required to use the multipart upload API, see\n Multipart\n Upload and Permissions.
\nFor request signing, multipart upload is just a series of regular requests. You initiate\n a multipart upload, send one or more requests to upload parts, and then complete the\n multipart upload process. You sign each request individually. There is nothing special\n about signing multipart upload requests. For more information about signing, see Authenticating Requests (Amazon Web Services Signature Version 4).
\nAfter you initiate a multipart upload and upload one or more parts, to stop being\n charged for storing the uploaded parts, you must either complete or abort the multipart\n upload. Amazon S3 frees up the space used to store the parts and stop charging you for\n storing them only after you either complete or abort a multipart upload.
\nServer-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it\n writes it to disks in its data centers and decrypts it when you access it. Amazon S3\n automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a\n multipart upload, if you don't specify encryption information in your request, the\n encryption setting of the uploaded parts is set to the default encryption configuration of\n the destination bucket. By default, all buckets have a base level of encryption\n configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the\n destination bucket has a default encryption configuration that uses server-side encryption\n with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C),\n Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded\n parts. When you perform a CreateMultipartUpload operation, if you want to use a different\n type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the\n object with a KMS key, an Amazon S3 managed key, or a customer-provided key. If the encryption\n setting in your request is different from the default encryption configuration of the\n destination bucket, the encryption setting in your request takes precedence. If you choose\n to provide your own encryption key, the request headers you provide in UploadPart\n and UploadPartCopy requests must match the headers you used in the request to\n initiate the upload by using CreateMultipartUpload
. You can request that Amazon S3\n save the uploaded parts encrypted with server-side encryption with an Amazon S3 managed key\n (SSE-S3), an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key\n (SSE-C).
To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester\n must have permission to the kms:Decrypt
and kms:GenerateDataKey*
\n actions on the key. These permissions are required because Amazon S3 must decrypt and read data\n from the encrypted file parts before it completes the multipart upload. For more\n information, see Multipart upload API\n and permissions and Protecting data using\n server-side encryption with Amazon Web Services KMS in the\n Amazon S3 User Guide.
If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key,\n then you must have these permissions on the key policy. If your IAM user or role belongs\n to a different account than the key, then you must have the permissions on both the key\n policy and your IAM user or role.
\nFor more information, see Protecting Data Using Server-Side\n Encryption.
\nWhen copying an object, you can optionally specify the accounts or groups that\n should be granted specific permissions on the new object. There are two ways to\n grant the permissions using the request headers:
\nSpecify a canned ACL with the x-amz-acl
request header. For\n more information, see Canned\n ACL.
Specify access permissions explicitly with the\n x-amz-grant-read
, x-amz-grant-read-acp
,\n x-amz-grant-write-acp
, and\n x-amz-grant-full-control
headers. These parameters map to\n the set of permissions that Amazon S3 supports in an ACL. For more information,\n see Access Control List (ACL) Overview.
You can use either a canned ACL or specify access permissions explicitly. You\n cannot do both.
\nAmazon S3 encrypts data\n by using server-side encryption with an Amazon S3 managed key (SSE-S3) by default. Server-side encryption is for data encryption at rest. Amazon S3 encrypts\n your data as it writes it to disks in its data centers and decrypts it when you\n access it. You can request that Amazon S3 encrypts\n data at rest by using server-side encryption with other key options. The option you use depends on\n whether you want to use KMS keys (SSE-KMS) or provide your own encryption keys\n (SSE-C).
\nUse KMS keys (SSE-KMS) that include the Amazon Web Services managed key\n (aws/s3
) and KMS customer managed keys stored in Key Management Service (KMS) – If you\n want Amazon Web Services to manage the keys used to encrypt data, specify the following\n headers in the request.
\n x-amz-server-side-encryption
\n
\n x-amz-server-side-encryption-aws-kms-key-id
\n
\n x-amz-server-side-encryption-context
\n
If you specify x-amz-server-side-encryption:aws:kms
, but\n don't provide x-amz-server-side-encryption-aws-kms-key-id
,\n Amazon S3 uses the Amazon Web Services managed key (aws/s3
key) in KMS to\n protect the data.
All GET
and PUT
requests for an object protected\n by KMS fail if you don't make them by using Secure Sockets Layer (SSL),\n Transport Layer Security (TLS), or Signature Version 4.
For more information about server-side encryption with KMS keys\n (SSE-KMS), see Protecting Data\n Using Server-Side Encryption with KMS keys.
\nUse customer-provided encryption keys (SSE-C) – If you want to manage\n your own encryption keys, provide all the following headers in the\n request.
\n\n x-amz-server-side-encryption-customer-algorithm
\n
\n x-amz-server-side-encryption-customer-key
\n
\n x-amz-server-side-encryption-customer-key-MD5
\n
For more information about server-side encryption with customer-provided\n encryption keys (SSE-C), see \n Protecting data using server-side encryption with customer-provided\n encryption keys (SSE-C).
\nYou also can use the following access control–related headers with this\n operation. By default, all objects are private. Only the owner has full access\n control. When adding a new object, you can grant permissions to individual\n Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then\n added to the access control list (ACL) on the object. For more information, see\n Using ACLs. With this operation, you can grant access permissions\n using one of the following two methods:
\nSpecify a canned ACL (x-amz-acl
) — Amazon S3 supports a set of\n predefined ACLs, known as canned ACLs. Each canned ACL\n has a predefined set of grantees and permissions. For more information, see\n Canned\n ACL.
Specify access permissions explicitly — To explicitly grant access\n permissions to specific Amazon Web Services accounts or groups, use the following headers.\n Each header maps to specific permissions that Amazon S3 supports in an ACL. For\n more information, see Access Control List (ACL)\n Overview. In the header, you specify a list of grantees who get\n the specific permission. To grant permissions explicitly, use:
\n\n x-amz-grant-read
\n
\n x-amz-grant-write
\n
\n x-amz-grant-read-acp
\n
\n x-amz-grant-write-acp
\n
\n x-amz-grant-full-control
\n
You specify each grantee as a type=value pair, where the type is one of\n the following:
\n\n id
– if the value specified is the canonical user ID\n of an Amazon Web Services account
\n uri
– if you are granting permissions to a predefined\n group
\n emailAddress
– if the value specified is the email\n address of an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
\nFor example, the following x-amz-grant-read
header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata:
\n x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"
\n
The following operations are related to CreateMultipartUpload
:
\n UploadPart\n
\n\n AbortMultipartUpload\n
\n\n ListParts\n
\n\n ListMultipartUploads\n
\nThe server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256, aws:kms
).
The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256
, aws:kms
).
If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n encryption customer managed key that was used for the object.
", + "smithy.api#documentation": "If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -21087,7 +22619,7 @@ "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Amazon Web Services KMS (SSE-KMS).
", + "smithy.api#documentation": "Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -21219,7 +22751,7 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256, aws:kms
).
The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256
, aws:kms
).
Specifies the ID of the symmetric encryption customer managed key to use for object encryption.\n All GET and PUT requests for an object protected by Amazon Web Services KMS will fail if not made via SSL\n or using SigV4. For information about configuring using any of the officially supported\n Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication\n in the Amazon S3 User Guide.
", + "smithy.api#documentation": "Specifies the ID of the symmetric encryption customer managed key to use for object encryption.\n All GET and PUT requests for an object protected by KMS will fail if they're not made via\n SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services\n SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication\n in the Amazon S3 User Guide.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -21276,7 +22808,7 @@ "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with\n server-side encryption using AWS KMS (SSE-KMS). Setting this header to true
\n causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.
Specifying this header with an object action doesn’t affect bucket-level settings for S3\n Bucket Key.
", + "smithy.api#documentation": "Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with\n server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to\n true
causes Amazon S3 to use an S3 Bucket Key for object encryption with\n SSE-KMS.
Specifying this header with an object action doesn’t affect bucket-level settings for S3\n Bucket Key.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -21388,7 +22920,7 @@ "Objects": { "target": "com.amazonaws.s3#ObjectIdentifierList", "traits": { - "smithy.api#documentation": "The objects to delete.
", + "smithy.api#documentation": "The object to delete.
", "smithy.api#required": {}, "smithy.api#xmlFlattened": {}, "smithy.api#xmlName": "Object" @@ -21416,6 +22948,15 @@ }, "traits": { "smithy.api#documentation": "Deletes the S3 bucket. All objects (including all object versions and delete markers) in\n the bucket must be deleted before the bucket itself can be deleted.
\nThe following operations are related to DeleteBucket
:
\n CreateBucket\n
\n\n DeleteObject\n
\nDeletes the cors
configuration information set for the bucket.
To use this operation, you must have permission to perform the\n s3:PutBucketCORS
action. The bucket owner has this permission by default\n and can grant this permission to others.
For information about cors
, see Enabling Cross-Origin Resource Sharing in\n the Amazon S3 User Guide.
The following operations are related to DeleteBucketCors
:
\n PutBucketCors\n
\n\n RESTOPTIONSobject\n
\nDeletes the cors
configuration information set for the bucket.
To use this operation, you must have permission to perform the\n s3:PutBucketCORS
action. The bucket owner has this permission by default\n and can grant this permission to others.
For information about cors
, see Enabling Cross-Origin Resource Sharing in\n the Amazon S3 User Guide.
\n Related Resources\n
\n\n PutBucketCors\n
\n\n RESTOPTIONSobject\n
\nDeletes the lifecycle configuration from the specified bucket. Amazon S3 removes all the\n lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your\n objects never expire, and Amazon S3 no longer automatically deletes any objects on the basis of\n rules contained in the deleted lifecycle configuration.
\nTo use this operation, you must have permission to perform the\n s3:PutLifecycleConfiguration
action. By default, the bucket owner has this\n permission and the bucket owner can grant this permission to others.
There is usually some time lag before lifecycle configuration deletion is fully\n propagated to all the Amazon S3 systems.
\nFor more information about the object expiration, see Elements to Describe Lifecycle Actions.
\nRelated actions include:
\nThis implementation of the DELETE action uses the policy subresource to delete the\n policy of a specified bucket. If you are using an identity other than the root user of the\n Amazon Web Services account that owns the bucket, the calling identity must have the\n DeleteBucketPolicy
permissions on the specified bucket and belong to the\n bucket owner's account to use this operation.
If you don't have DeleteBucketPolicy
permissions, Amazon S3 returns a 403\n Access Denied
error. If you have the correct permissions, but you're not using an\n identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not\n Allowed
error.
To ensure that bucket owners don't inadvertently lock themselves out of their own\n buckets, the root principal in a bucket owner's Amazon Web Services account can perform the\n GetBucketPolicy
, PutBucketPolicy
, and\n DeleteBucketPolicy
API actions, even if their bucket policy explicitly\n denies the root principal's access. Bucket owner root principals can only be blocked from performing \n these API actions by VPC endpoint policies and Amazon Web Services Organizations policies.
For more information about bucket policies, see Using Bucket Policies and\n UserPolicies.
\nThe following operations are related to DeleteBucketPolicy
\n
\n CreateBucket\n
\n\n DeleteObject\n
\nDeletes the replication configuration from the bucket.
\nTo use this operation, you must have permissions to perform the\n s3:PutReplicationConfiguration
action. The bucket owner has these\n permissions by default and can grant it to others. For more information about permissions,\n see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.
It can take a while for the deletion of a replication configuration to fully\n propagate.
\nFor information about replication configuration, see Replication in the\n Amazon S3 User Guide.
\nThe following operations are related to DeleteBucketReplication
:
\n PutBucketReplication\n
\n\n GetBucketReplication\n
\nDeletes the tags from the bucket.
\nTo use this operation, you must have permission to perform the\n s3:PutBucketTagging
action. By default, the bucket owner has this\n permission and can grant this permission to others.
The following operations are related to DeleteBucketTagging
:
\n GetBucketTagging\n
\n\n PutBucketTagging\n
\nThis action removes the website configuration for a bucket. Amazon S3 returns a 200\n OK
response upon successfully deleting a website configuration on the specified\n bucket. You will get a 200 OK
response if the website configuration you are\n trying to delete does not exist on the bucket. Amazon S3 returns a 404
response if\n the bucket specified in the request does not exist.
This DELETE action requires the S3:DeleteBucketWebsite
permission. By\n default, only the bucket owner can delete the website configuration attached to a bucket.\n However, bucket owners can grant other users permission to delete the website configuration\n by writing a bucket policy granting them the S3:DeleteBucketWebsite
\n permission.
For more information about hosting websites, see Hosting Websites on Amazon S3.
\nThe following operations are related to DeleteBucketWebsite
:
\n GetBucketWebsite\n
\n\n PutBucketWebsite\n
\nRemoves the null version (if there is one) of an object and inserts a delete marker,\n which becomes the latest version of the object. If there isn't a null version, Amazon S3 does\n not remove any objects but will still respond that the command was successful.
\nTo remove a specific version, you must use the version Id subresource. Using this\n subresource permanently deletes the version. If the object deleted is a delete marker, Amazon S3\n sets the response header, x-amz-delete-marker
, to true.
If the object you want to delete is in a bucket where the bucket versioning\n configuration is MFA Delete enabled, you must include the x-amz-mfa
request\n header in the DELETE versionId
request. Requests that include\n x-amz-mfa
must use HTTPS.
For more information about MFA Delete, see Using MFA Delete. To see sample\n requests that use versioning, see Sample\n Request.
\nYou can delete objects by explicitly calling DELETE Object or configure its lifecycle\n (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block\n users or accounts from removing or deleting objects from your bucket, you must deny them\n the s3:DeleteObject
, s3:DeleteObjectVersion
, and\n s3:PutLifeCycleConfiguration
actions.
The following action is related to DeleteObject
:
\n PutObject\n
\nRemoves the entire tag set from the specified object. For more information about\n managing object tags, see Object Tagging.
\nTo use this operation, you must have permission to perform the\n s3:DeleteObjectTagging
action.
To delete tags of a specific object version, add the versionId
query\n parameter in the request. You will need permission for the\n s3:DeleteObjectVersionTagging
action.
The following operations are related to DeleteObjectTagging
:
\n PutObjectTagging\n
\n\n GetObjectTagging\n
\nIf the encryption type is aws:kms
, this optional value specifies the ID of\n the symmetric encryption customer managed key to use for encryption of job results. Amazon S3 only\n supports symmetric encryption KMS keys. For more information, see Asymmetric keys in Amazon Web Services KMS in the Amazon Web Services Key Management Service\n Developer Guide.
If the encryption type is aws:kms
, this optional value specifies the ID of\n the symmetric encryption customer managed key to use for encryption of job results. Amazon S3 only\n supports symmetric encryption KMS keys. For more information, see Asymmetric keys in KMS in the Amazon Web Services Key Management Service\n Developer Guide.
The accelerate configuration of the bucket.
" } + }, + "RequestCharged": { + "target": "com.amazonaws.s3#RequestCharged", + "traits": { + "smithy.api#httpHeader": "x-amz-request-charged" + } } }, "traits": { @@ -23051,6 +24677,12 @@ "smithy.api#documentation": "The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
Returns the Cross-Origin Resource Sharing (CORS) configuration information set for the\n bucket.
\n To use this operation, you must have permission to perform the\n s3:GetBucketCORS
action. By default, the bucket owner has this permission\n and can grant it to others.
To use this API operation against an access point, provide the alias of the access point in place of the bucket name.
\nTo use this API operation against an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError
is returned. \nFor more information about InvalidAccessPointAliasError
, see List of\n Error Codes.
For more information about CORS, see Enabling Cross-Origin Resource\n Sharing.
\nThe following operations are related to GetBucketCors
:
\n PutBucketCors\n
\n\n DeleteBucketCors\n
\nReturns the default encryption configuration for an Amazon S3 bucket. By default, all buckets have a default encryption configuration that\n uses server-side encryption with Amazon S3 managed keys (SSE-S3). For information\n about the bucket default encryption feature, see Amazon S3 Bucket\n Default Encryption in the Amazon S3 User Guide.
\nTo use this operation, you must have permission to perform the\n s3:GetEncryptionConfiguration
action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.
The following operations are related to GetBucketEncryption
:
\n PutBucketEncryption\n
\nReturns the default encryption configuration for an Amazon S3 bucket. By default, all buckets\n have a default encryption configuration that uses server-side encryption with Amazon S3 managed\n keys (SSE-S3). For information about the bucket default encryption feature, see Amazon S3 Bucket\n Default Encryption in the Amazon S3 User Guide.
\nTo use this operation, you must have permission to perform the\n s3:GetEncryptionConfiguration
action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.
The following operations are related to GetBucketEncryption
:
\n PutBucketEncryption\n
\nBucket lifecycle configuration now supports specifying a lifecycle rule using an\n object key name prefix, one or more object tags, or a combination of both. Accordingly,\n this section describes the latest API. The response describes the new filter element\n that you can use to specify a filter to select a subset of objects to which the rule\n applies. If you are using a previous version of the lifecycle configuration, it still\n works. For the earlier action, see GetBucketLifecycle.
\nReturns the lifecycle configuration information set on the bucket. For information about\n lifecycle configuration, see Object Lifecycle\n Management.
\nTo use this operation, you must have permission to perform the\n s3:GetLifecycleConfiguration
action. The bucket owner has this permission,\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.
\n GetBucketLifecycleConfiguration
has the following special error:
Error code: NoSuchLifecycleConfiguration
\n
Description: The lifecycle configuration does not exist.
\nHTTP Status Code: 404 Not Found
\nSOAP Fault Code Prefix: Client
\nThe following operations are related to\n GetBucketLifecycleConfiguration
:
\n GetBucketLifecycle\n
\n\n PutBucketLifecycle\n
\nReturns the Region the bucket resides in. You set the bucket's Region using the\n LocationConstraint
request parameter in a CreateBucket
\n request. For more information, see CreateBucket.
To use this API operation against an access point, provide the alias of the access point in place of the bucket name.
\nTo use this API operation against an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError
is returned. \nFor more information about InvalidAccessPointAliasError
, see List of\n Error Codes.
We recommend that you use HeadBucket to return the Region\n that a bucket resides in. For backward compatibility, Amazon S3 continues to support\n GetBucketLocation.
\nThe following operations are related to GetBucketLocation
:
\n GetObject\n
\n\n CreateBucket\n
\nReturns the policy of a specified bucket. If you are using an identity other than the\n root user of the Amazon Web Services account that owns the bucket, the calling identity must have the\n GetBucketPolicy
permissions on the specified bucket and belong to the\n bucket owner's account in order to use this operation.
If you don't have GetBucketPolicy
permissions, Amazon S3 returns a 403\n Access Denied
error. If you have the correct permissions, but you're not using an\n identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not\n Allowed
error.
To ensure that bucket owners don't inadvertently lock themselves out of their own\n buckets, the root principal in a bucket owner's Amazon Web Services account can perform the\n GetBucketPolicy
, PutBucketPolicy
, and\n DeleteBucketPolicy
API actions, even if their bucket policy explicitly\n denies the root principal's access. Bucket owner root principals can only be blocked from performing \n these API actions by VPC endpoint policies and Amazon Web Services Organizations policies.
To use this API operation against an access point, provide the alias of the access point in place of the bucket name.
\nTo use this API operation against an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError
is returned. \nFor more information about InvalidAccessPointAliasError
, see List of\n Error Codes.
For more information about bucket policies, see Using Bucket Policies and User\n Policies.
\nThe following action is related to GetBucketPolicy
:
\n GetObject\n
\nReturns the replication configuration of a bucket.
\nIt can take a while to propagate the put or delete a replication configuration to\n all Amazon S3 systems. Therefore, a get request soon after put or delete can return a wrong\n result.
\nFor information about replication configuration, see Replication in the\n Amazon S3 User Guide.
\nThis action requires permissions for the s3:GetReplicationConfiguration
\n action. For more information about permissions, see Using Bucket Policies and User\n Policies.
If you include the Filter
element in a replication configuration, you must\n also include the DeleteMarkerReplication
and Priority
elements.\n The response also returns those elements.
For information about GetBucketReplication
errors, see List of\n replication-related error codes\n
The following operations are related to GetBucketReplication
:
\n PutBucketReplication\n
\nReturns the request payment configuration of a bucket. To use this version of the\n operation, you must be the bucket owner. For more information, see Requester Pays\n Buckets.
\nThe following operations are related to GetBucketRequestPayment
:
\n ListObjects\n
\nReturns the tag set associated with the bucket.
\nTo use this operation, you must have permission to perform the\n s3:GetBucketTagging
action. By default, the bucket owner has this\n permission and can grant this permission to others.
\n GetBucketTagging
has the following special error:
Error code: NoSuchTagSet
\n
Description: There is no tag set associated with the bucket.
\nThe following operations are related to GetBucketTagging
:
\n PutBucketTagging\n
\n\n DeleteBucketTagging\n
\nReturns the versioning state of a bucket.
\nTo retrieve the versioning state of a bucket, you must be the bucket owner.
\nThis implementation also returns the MFA Delete status of the versioning state. If the\n MFA Delete status is enabled
, the bucket owner must use an authentication\n device to change the versioning state of the bucket.
The following operations are related to GetBucketVersioning
:
\n GetObject\n
\n\n PutObject\n
\n\n DeleteObject\n
\nReturns the website configuration for a bucket. To host website on Amazon S3, you can\n configure a bucket as website by adding a website configuration. For more information about\n hosting websites, see Hosting Websites on Amazon S3.
\nThis GET action requires the S3:GetBucketWebsite
permission. By default,\n only the bucket owner can read the bucket website configuration. However, bucket owners can\n allow other users to read the website configuration by writing a bucket policy granting\n them the S3:GetBucketWebsite
permission.
The following operations are related to GetBucketWebsite
:
\n DeleteBucketWebsite\n
\n\n PutBucketWebsite\n
\nRetrieves objects from Amazon S3. To use GET
, you must have READ
\n access to the object. If you grant READ
access to the anonymous user, you can\n return the object without using an authorization header.
An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer\n file system. You can, however, create a logical hierarchy by using object key names that\n imply a folder structure. For example, instead of naming an object sample.jpg
,\n you can name it photos/2006/February/sample.jpg
.
To get an object from such a logical hierarchy, specify the full key name for the object\n in the GET
operation. For a virtual hosted-style request example, if you have\n the object photos/2006/February/sample.jpg
, specify the resource as\n /photos/2006/February/sample.jpg
. For a path-style request example, if you\n have the object photos/2006/February/sample.jpg
in the bucket named\n examplebucket
, specify the resource as\n /examplebucket/photos/2006/February/sample.jpg
. For more information about\n request types, see HTTP Host\n Header Bucket Specification.
For more information about returning the ACL of an object, see GetObjectAcl.
\nIf the object you are retrieving is stored in the S3 Glacier or\n S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or\n S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a\n copy using RestoreObject. Otherwise, this action returns an\n InvalidObjectState
error. For information about restoring archived objects,\n see Restoring\n Archived Objects.
Encryption request headers, like x-amz-server-side-encryption
, should not\n be sent for GET requests if your object uses server-side encryption with KMS keys\n (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your\n object does use these types of keys, you’ll get an HTTP 400 BadRequest error.
If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object,\n you must use the following headers:
\nx-amz-server-side-encryption-customer-algorithm
\nx-amz-server-side-encryption-customer-key
\nx-amz-server-side-encryption-customer-key-MD5
\nFor more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys).
\nAssuming you have the relevant permission to read object tags, the response also returns\n the x-amz-tagging-count
header that provides the count of number of tags\n associated with the object. You can use GetObjectTagging to retrieve\n the tag set associated with an object.
You need the relevant read object (or version) permission for this operation. For more\n information, see Specifying Permissions in a\n Policy. If the object you request does not exist, the error Amazon S3 returns depends\n on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 will\n return an HTTP status code 404 (\"no such key\") error.
If you don’t have the s3:ListBucket
permission, Amazon S3 will return an\n HTTP status code 403 (\"access denied\") error.
By default, the GET action returns the current version of an object. To return a\n different version, use the versionId
subresource.
If you supply a versionId
, you need the\n s3:GetObjectVersion
permission to access a specific version of an\n object. If you request a specific version, you do not need to have the\n s3:GetObject
permission. If you request the current version\n without a specific version ID, only s3:GetObject
permission is\n required. s3:GetObjectVersion
permission won't be required.
If the current version of the object is a delete marker, Amazon S3 behaves as if the\n object was deleted and includes x-amz-delete-marker: true
in the\n response.
For more information about versioning, see PutBucketVersioning.
\nThere are times when you want to override certain response header values in a GET\n response. For example, you might override the Content-Disposition
response\n header value in your GET request.
You can override values for a set of response headers using the following query\n parameters. These response header values are sent only on a successful request, that is,\n when status code 200 OK is returned. The set of headers you can override using these\n parameters is a subset of the headers that Amazon S3 accepts when you create an object. The\n response headers that you can override for the GET response are Content-Type
,\n Content-Language
, Expires
, Cache-Control
,\n Content-Disposition
, and Content-Encoding
. To override these\n header values in the GET response, you use the following request parameters.
You must sign the request, either using an Authorization header or a presigned URL,\n when using these parameters. They cannot be used with an unsigned (anonymous)\n request.
\n\n response-content-type
\n
\n response-content-language
\n
\n response-expires
\n
\n response-cache-control
\n
\n response-content-disposition
\n
\n response-content-encoding
\n
If both of the If-Match
and If-Unmodified-Since
headers are\n present in the request as follows: If-Match
condition evaluates to\n true
, and; If-Unmodified-Since
condition evaluates to\n false
; then, S3 returns 200 OK and the data requested.
If both of the If-None-Match
and If-Modified-Since
headers are\n present in the request as follows: If-None-Match
condition evaluates to\n false
, and; If-Modified-Since
condition evaluates to\n true
; then, S3 returns 304 Not Modified response code.
For more information about conditional requests, see RFC 7232.
\nThe following operations are related to GetObject
:
\n ListBuckets\n
\n\n GetObjectAcl\n
\nRetrieves objects from Amazon S3. To use GET
, you must have READ
\n access to the object. If you grant READ
access to the anonymous user, you can\n return the object without using an authorization header.
An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer\n file system. You can, however, create a logical hierarchy by using object key names that\n imply a folder structure. For example, instead of naming an object sample.jpg
,\n you can name it photos/2006/February/sample.jpg
.
To get an object from such a logical hierarchy, specify the full key name for the object\n in the GET
operation. For a virtual hosted-style request example, if you have\n the object photos/2006/February/sample.jpg
, specify the resource as\n /photos/2006/February/sample.jpg
. For a path-style request example, if you\n have the object photos/2006/February/sample.jpg
in the bucket named\n examplebucket
, specify the resource as\n /examplebucket/photos/2006/February/sample.jpg
. For more information about\n request types, see HTTP Host\n Header Bucket Specification.
For more information about returning the ACL of an object, see GetObjectAcl.
\nIf the object you are retrieving is stored in the S3 Glacier Flexible Retrieval or\n S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or\n S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a\n copy using RestoreObject. Otherwise, this action returns an\n InvalidObjectState
error. For information about restoring archived objects,\n see Restoring\n Archived Objects.
Encryption request headers, like x-amz-server-side-encryption
, should not\n be sent for GET requests if your object uses server-side encryption with Key Management Service (KMS)\n keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or\n server-side encryption with Amazon S3 managed encryption keys (SSE-S3). If your object does use\n these types of keys, you’ll get an HTTP 400 Bad Request error.
If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object,\n you must use the following headers:
\n\n x-amz-server-side-encryption-customer-algorithm
\n
\n x-amz-server-side-encryption-customer-key
\n
\n x-amz-server-side-encryption-customer-key-MD5
\n
For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys).
\nAssuming you have the relevant permission to read object tags, the response also returns\n the x-amz-tagging-count
header that provides the count of number of tags\n associated with the object. You can use GetObjectTagging to retrieve\n the tag set associated with an object.
You need the relevant read object (or version) permission for this operation. For more\n information, see Specifying Permissions in a\n Policy. If the object that you request doesn’t exist, the error that Amazon S3 returns depends\n on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3\n returns an HTTP status code 404 (Not Found) error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an\n HTTP status code 403 (\"access denied\") error.
By default, the GET
action returns the current version of an object. To return a\n different version, use the versionId
subresource.
If you supply a versionId
, you need the\n s3:GetObjectVersion
permission to access a specific version of an\n object. If you request a specific version, you do not need to have the\n s3:GetObject
permission. If you request the current version\n without a specific version ID, only s3:GetObject
permission is\n required. s3:GetObjectVersion
permission won't be required.
If the current version of the object is a delete marker, Amazon S3 behaves as if the\n object was deleted and includes x-amz-delete-marker: true
in the\n response.
For more information about versioning, see PutBucketVersioning.
\nThere are times when you want to override certain response header values in a GET
\n response. For example, you might override the Content-Disposition
response\n header value in your GET
request.
You can override values for a set of response headers using the following query\n parameters. These response header values are sent only on a successful request, that is,\n when status code 200 OK is returned. The set of headers you can override using these\n parameters is a subset of the headers that Amazon S3 accepts when you create an object. The\n response headers that you can override for the GET
response are Content-Type
,\n Content-Language
, Expires
, Cache-Control
,\n Content-Disposition
, and Content-Encoding
. To override these\n header values in the GET
response, you use the following request parameters.
You must sign the request, either using an Authorization header or a presigned URL,\n when using these parameters. They cannot be used with an unsigned (anonymous)\n request.
\n\n response-content-type
\n
\n response-content-language
\n
\n response-expires
\n
\n response-cache-control
\n
\n response-content-disposition
\n
\n response-content-encoding
\n
If both of the If-Match
and If-Unmodified-Since
headers are\n present in the request as follows: If-Match
condition evaluates to\n true
, and; If-Unmodified-Since
condition evaluates to\n false
; then, S3 returns 200 OK and the data requested.
If both of the If-None-Match
and If-Modified-Since
headers are\n present in the request as follows: If-None-Match
condition evaluates to\n false
, and; If-Modified-Since
condition evaluates to\n true
; then, S3 returns 304 Not Modified response code.
For more information about conditional requests, see RFC 7232.
\nThe following operations are related to GetObject
:
\n ListBuckets\n
\n\n GetObjectAcl\n
\nReturns the access control list (ACL) of an object. To use this operation, you must have\n s3:GetObjectAcl
permissions or READ_ACP
access to the object.\n For more information, see Mapping of ACL permissions and access policy permissions in the Amazon S3\n User Guide\n
This action is not supported by Amazon S3 on Outposts.
\nBy default, GET returns ACL information about the current version of an object. To\n return ACL information about a different version, use the versionId subresource.
\nIf your bucket uses the bucket owner enforced setting for S3 Object Ownership,\n requests to read ACLs are still supported and return the\n bucket-owner-full-control
ACL with the owner being the account that\n created the bucket. For more information, see Controlling object\n ownership and disabling ACLs in the\n Amazon S3 User Guide.
The following operations are related to GetObjectAcl
:
\n GetObject\n
\n\n GetObjectAttributes\n
\n\n DeleteObject\n
\n\n PutObject\n
\nThe server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256, aws:kms
).
The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256
, aws:kms
, aws:kms:dsse
).
If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n encryption customer managed key that was used for the object.
", + "smithy.api#documentation": "If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -24900,7 +26742,7 @@ "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "Indicates whether the object uses an S3 Bucket Key for server-side encryption with Amazon Web Services\n KMS (SSE-KMS).
", + "smithy.api#documentation": "Indicates whether the object uses an S3 Bucket Key for server-side encryption with\n Key Management Service (KMS) keys (SSE-KMS).
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -25221,6 +27063,29 @@ }, "traits": { "smithy.api#documentation": "Returns the tag-set of an object. You send the GET request against the tagging\n subresource associated with the object.
\nTo use this operation, you must have permission to perform the\n s3:GetObjectTagging
action. By default, the GET action returns information\n about current version of an object. For a versioned bucket, you can have multiple versions\n of an object in your bucket. To retrieve tags of any other version, use the versionId query\n parameter. You also need permission for the s3:GetObjectVersionTagging
\n action.
By default, the bucket owner has this permission and can grant this permission to\n others.
\nFor information about the Amazon S3 object tagging feature, see Object Tagging.
\nThe following actions are related to GetObjectTagging
:
\n DeleteObjectTagging\n
\n\n GetObjectAttributes\n
\n\n PutObjectTagging\n
\nReturns torrent files from a bucket. BitTorrent can save you bandwidth when you're\n distributing large files.
\nYou can get torrent only for objects that are less than 5 GB in size, and that are\n not encrypted using server-side encryption with a customer-provided encryption\n key.
\nTo use GET, you must have READ access to the object.
\nThis action is not supported by Amazon S3 on Outposts.
\nThe following action is related to GetObjectTorrent
:
\n GetObject\n
\nThis action is useful to determine if a bucket exists and you have permission to access\n it. The action returns a 200 OK
if the bucket exists and you have permission\n to access it.
If the bucket does not exist or you do not have permission to access it, the\n HEAD
request returns a generic 400 Bad Request
, 403\n Forbidden
or 404 Not Found
code. A message body is not included, so\n you cannot determine the exception beyond these error codes.
To use this operation, you must have permissions to perform the\n s3:ListBucket
action. The bucket owner has this permission by default and\n can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.
To use this API operation against an access point, you must provide the alias of the access point in place of the\n bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to\n the access point hostname. The access point hostname takes the form\n AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.\n When using the Amazon Web Services SDKs, you provide the ARN in place of the bucket name. For more\n information, see Using access points.
\nTo use this API operation against an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError
is returned. \nFor more information about InvalidAccessPointAliasError
, see List of\n Error Codes.
The HEAD action retrieves metadata from an object without returning the object itself.\n This action is useful if you're only interested in an object's metadata. To use HEAD, you\n must have READ access to the object.
\nA HEAD
request has the same options as a GET
action on an\n object. The response is identical to the GET
response except that there is no\n response body. Because of this, if the HEAD
request generates an error, it\n returns a generic 400 Bad Request
, 403 Forbidden
or 404 Not\n Found
code. It is not possible to retrieve the exact exception beyond these error\n codes.
If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the\n metadata from the object, you must use the following headers:
\nx-amz-server-side-encryption-customer-algorithm
\nx-amz-server-side-encryption-customer-key
\nx-amz-server-side-encryption-customer-key-MD5
\nFor more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys).
\nEncryption request headers, like x-amz-server-side-encryption
,\n should not be sent for GET requests if your object uses server-side encryption\n with KMS keys (SSE-KMS) or server-side encryption with Amazon S3–managed encryption\n keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400\n BadRequest error.
The last modified property in this case is the creation date of the\n object.
\nRequest headers are limited to 8 KB in size. For more information, see Common\n Request Headers.
\nConsider the following when using request headers:
\n Consideration 1 – If both of the If-Match
and\n If-Unmodified-Since
headers are present in the request as\n follows:
\n If-Match
condition evaluates to true
, and;
\n If-Unmodified-Since
condition evaluates to\n false
;
Then Amazon S3 returns 200 OK
and the data requested.
Consideration 2 – If both of the If-None-Match
and\n If-Modified-Since
headers are present in the request as\n follows:
\n If-None-Match
condition evaluates to false
,\n and;
\n If-Modified-Since
condition evaluates to\n true
;
Then Amazon S3 returns the 304 Not Modified
response code.
For more information about conditional requests, see RFC 7232.
\nYou need the relevant read object (or version) permission for this operation. For more\n information, see Actions, resources, and condition keys for Amazon S3. \n If the object you request does not exist, the error Amazon S3 returns depends\n on whether you also have the s3:ListBucket permission.
\nIf you have the s3:ListBucket
permission on the bucket, Amazon S3 returns\n an HTTP status code 404 (\"no such key\") error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an HTTP\n status code 403 (\"access denied\") error.
The following actions are related to HeadObject
:
\n GetObject\n
\n\n GetObjectAttributes\n
\nThe HEAD
action retrieves metadata from an object without returning the object itself.\n This action is useful if you're only interested in an object's metadata. To use HEAD
, you\n must have READ access to the object.
A HEAD
request has the same options as a GET
action on an\n object. The response is identical to the GET
response except that there is no\n response body. Because of this, if the HEAD
request generates an error, it\n returns a generic 400 Bad Request
, 403 Forbidden
or 404 Not\n Found
code. It is not possible to retrieve the exact exception beyond these error\n codes.
If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the\n metadata from the object, you must use the following headers:
\n\n x-amz-server-side-encryption-customer-algorithm
\n
\n x-amz-server-side-encryption-customer-key
\n
\n x-amz-server-side-encryption-customer-key-MD5
\n
For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys).
\nEncryption request headers, like x-amz-server-side-encryption
,\n should not be sent for GET
requests if your object uses server-side\n encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side\n encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3\n managed encryption keys (SSE-S3). If your object does use these types of keys,\n you’ll get an HTTP 400 Bad Request error.
The last modified property in this case is the creation date of the\n object.
\nRequest headers are limited to 8 KB in size. For more information, see Common\n Request Headers.
\nConsider the following when using request headers:
\n Consideration 1 – If both of the If-Match
and\n If-Unmodified-Since
headers are present in the request as\n follows:
\n If-Match
condition evaluates to true
, and;
\n If-Unmodified-Since
condition evaluates to\n false
;
Then Amazon S3 returns 200 OK
and the data requested.
Consideration 2 – If both of the If-None-Match
and\n If-Modified-Since
headers are present in the request as\n follows:
\n If-None-Match
condition evaluates to false
,\n and;
\n If-Modified-Since
condition evaluates to\n true
;
Then Amazon S3 returns the 304 Not Modified
response code.
For more information about conditional requests, see RFC 7232.
\nYou need the relevant read object (or version) permission for this operation. For more\n information, see Actions, resources, and condition keys for Amazon S3. \n If the object you request doesn't exist, the error that Amazon S3 returns depends\n on whether you also have the s3:ListBucket permission.
\nIf you have the s3:ListBucket
permission on the bucket, Amazon S3 returns\n an HTTP status code 404 error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an HTTP\n status code 403 error.
The following actions are related to HeadObject
:
\n GetObject\n
\n\n GetObjectAttributes\n
\nThe server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256, aws:kms
).
The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256
, aws:kms
, aws:kms:dsse
).
If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n encryption customer managed key that was used for the object.
", + "smithy.api#documentation": "If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -25861,7 +27746,7 @@ "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "Indicates whether the object uses an S3 Bucket Key for server-side encryption with Amazon Web Services\n KMS (SSE-KMS).
", + "smithy.api#documentation": "Indicates whether the object uses an S3 Bucket Key for server-side encryption with\n Key Management Service (KMS) keys (SSE-KMS).
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -27238,6 +29123,32 @@ }, "traits": { "smithy.api#documentation": "Returns a list of all buckets owned by the authenticated sender of the request. To use\n this operation, you must have the s3:ListAllMyBuckets
permission.
For information about Amazon S3 buckets, see Creating, configuring, and\n working with Amazon S3 buckets.
", + "smithy.api#examples": [ + { + "title": "To list all buckets", + "documentation": "The following example returns all the buckets owned by the sender of this request.", + "output": { + "Owner": { + "DisplayName": "own-display-name", + "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31" + }, + "Buckets": [ + { + "CreationDate": "2012-02-15T21:03:02.000Z", + "Name": "examplebucket" + }, + { + "CreationDate": "2011-07-24T19:33:50.000Z", + "Name": "examplebucket2" + }, + { + "CreationDate": "2010-12-17T00:56:49.000Z", + "Name": "examplebucket3" + } + ] + } + } + ], "smithy.api#http": { "method": "GET", "uri": "/", @@ -27362,6 +29273,12 @@ "traits": { "smithy.api#documentation": "Encoding type used by Amazon S3 to encode object keys in the response.
\nIf you specify encoding-type
request parameter, Amazon S3 includes this element\n in the response, and returns encoded key name values in the following response\n elements:
\n Delimiter
, KeyMarker
, Prefix
,\n NextKeyMarker
, Key
.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
Returns metadata about all versions of the objects in a bucket. You can also use request\n parameters as selection criteria to return metadata about a subset of all the object\n versions.
\n To use this operation, you must have permissions to perform the\n s3:ListBucketVersions
action. Be aware of the name difference.
A 200 OK response can contain valid or invalid XML. Make sure to design your\n application to parse the contents of the response and handle it appropriately.
\nTo use this operation, you must have READ access to the bucket.
\nThis action is not supported by Amazon S3 on Outposts.
\nThe following operations are related to ListObjectVersions
:
\n ListObjectsV2\n
\n\n GetObject\n
\n\n PutObject\n
\n\n DeleteObject\n
\nEncoding type used by Amazon S3 to encode object key names in the XML response.
\nIf you specify encoding-type request parameter, Amazon S3 includes this element in the\n response, and returns encoded key name values in the following response elements:
\n\n KeyMarker, NextKeyMarker, Prefix, Key
, and Delimiter
.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
Encoding type used by Amazon S3 to encode object keys in the response.
" } + }, + "RequestCharged": { + "target": "com.amazonaws.s3#RequestCharged", + "traits": { + "smithy.api#httpHeader": "x-amz-request-charged" + } } }, "traits": { @@ -27885,6 +29866,12 @@ "traits": { "smithy.api#documentation": "If StartAfter was sent with the request, it is included in the response.
" } + }, + "RequestCharged": { + "target": "com.amazonaws.s3#RequestCharged", + "traits": { + "smithy.api#httpHeader": "x-amz-request-charged" + } } }, "traits": { @@ -29736,7 +31723,18 @@ "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "Sets the permissions on an existing bucket using access control lists (ACL). For more\n information, see Using ACLs. To set the ACL of a\n bucket, you must have WRITE_ACP
permission.
You can use one of the following two ways to set a bucket's permissions:
\nSpecify the ACL in the request body
\nSpecify permissions using request headers
\nYou cannot specify access permission using both the body and the request\n headers.
\nDepending on your application needs, you may choose to set the ACL on a bucket using\n either the request body or the headers. For example, if you have an existing application\n that updates a bucket ACL using the request body, then you can continue to use that\n approach.
\nIf your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs\n are disabled and no longer affect permissions. You must use policies to grant access to\n your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return\n the AccessControlListNotSupported
error code. Requests to read ACLs are\n still supported. For more information, see Controlling object\n ownership in the Amazon S3 User Guide.
You can set access permissions using one of the following methods:
\nSpecify a canned ACL with the x-amz-acl
request header. Amazon S3 supports\n a set of predefined ACLs, known as canned ACLs. Each canned ACL\n has a predefined set of grantees and permissions. Specify the canned ACL name as the\n value of x-amz-acl
. If you use this header, you cannot use other access\n control-specific headers in your request. For more information, see Canned\n ACL.
Specify access permissions explicitly with the x-amz-grant-read
,\n x-amz-grant-read-acp
, x-amz-grant-write-acp
, and\n x-amz-grant-full-control
headers. When using these headers, you\n specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who\n will receive the permission. If you use these ACL-specific headers, you cannot use\n the x-amz-acl
header to set a canned ACL. These parameters map to the\n set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control\n List (ACL) Overview.
You specify each grantee as a type=value pair, where the type is one of the\n following:
\n\n id
– if the value specified is the canonical user ID of an\n Amazon Web Services account
\n uri
– if you are granting permissions to a predefined\n group
\n emailAddress
– if the value specified is the email address of\n an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
\nFor example, the following x-amz-grant-write
header grants create,\n overwrite, and delete objects permission to LogDelivery group predefined by Amazon S3 and\n two Amazon Web Services accounts identified by their email addresses.
\n x-amz-grant-write: uri=\"http://acs.amazonaws.com/groups/s3/LogDelivery\",\n id=\"111122223333\", id=\"555566667777\"
\n
You can use either a canned ACL or specify access permissions explicitly. You cannot do\n both.
\nYou can specify the person (grantee) to whom you're assigning access rights (using\n request elements) in the following ways:
\nBy the person's ID:
\n\n
\n
DisplayName is optional and ignored in the request
\nBy URI:
\n\n
\n
By Email address:
\n\n
\n
The grantee is resolved to the CanonicalUser and, in a response to a GET Object\n acl request, appears as the CanonicalUser.
\nUsing email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
\nThe following operations are related to PutBucketAcl
:
\n CreateBucket\n
\n\n DeleteBucket\n
\n\n GetObjectAcl\n
\nSets the permissions on an existing bucket using access control lists (ACL). For more\n information, see Using ACLs. To set the ACL of a\n bucket, you must have WRITE_ACP
permission.
You can use one of the following two ways to set a bucket's permissions:
\nSpecify the ACL in the request body
\nSpecify permissions using request headers
\nYou cannot specify access permission using both the body and the request\n headers.
\nDepending on your application needs, you may choose to set the ACL on a bucket using\n either the request body or the headers. For example, if you have an existing application\n that updates a bucket ACL using the request body, then you can continue to use that\n approach.
\nIf your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs\n are disabled and no longer affect permissions. You must use policies to grant access to\n your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return\n the AccessControlListNotSupported
error code. Requests to read ACLs are\n still supported. For more information, see Controlling object\n ownership in the Amazon S3 User Guide.
You can set access permissions by using one of the following methods:
\nSpecify a canned ACL with the x-amz-acl
request header. Amazon S3 supports\n a set of predefined ACLs, known as canned ACLs. Each canned ACL\n has a predefined set of grantees and permissions. Specify the canned ACL name as the\n value of x-amz-acl
. If you use this header, you cannot use other access\n control-specific headers in your request. For more information, see Canned\n ACL.
Specify access permissions explicitly with the x-amz-grant-read
,\n x-amz-grant-read-acp
, x-amz-grant-write-acp
, and\n x-amz-grant-full-control
headers. When using these headers, you\n specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who\n will receive the permission. If you use these ACL-specific headers, you cannot use\n the x-amz-acl
header to set a canned ACL. These parameters map to the\n set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control\n List (ACL) Overview.
You specify each grantee as a type=value pair, where the type is one of the\n following:
\n\n id
– if the value specified is the canonical user ID of an\n Amazon Web Services account
\n uri
– if you are granting permissions to a predefined\n group
\n emailAddress
– if the value specified is the email address of\n an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
\nFor example, the following x-amz-grant-write
header grants create,\n overwrite, and delete objects permission to LogDelivery group predefined by Amazon S3 and\n two Amazon Web Services accounts identified by their email addresses.
\n x-amz-grant-write: uri=\"http://acs.amazonaws.com/groups/s3/LogDelivery\",\n id=\"111122223333\", id=\"555566667777\"
\n
You can use either a canned ACL or specify access permissions explicitly. You cannot do\n both.
\nYou can specify the person (grantee) to whom you're assigning access rights (using\n request elements) in the following ways:
\nBy the person's ID:
\n\n
\n
DisplayName is optional and ignored in the request
\nBy URI:
\n\n
\n
By Email address:
\n\n
\n
The grantee is resolved to the CanonicalUser and, in a response to a GET Object\n acl request, appears as the CanonicalUser.
\nUsing email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
\nThe following operations are related to PutBucketAcl
:
\n CreateBucket\n
\n\n DeleteBucket\n
\n\n GetObjectAcl\n
\nSets the cors
configuration for your bucket. If the configuration exists,\n Amazon S3 replaces it.
To use this operation, you must be allowed to perform the s3:PutBucketCORS
\n action. By default, the bucket owner has this permission and can grant it to others.
You set this configuration on a bucket so that the bucket can service cross-origin\n requests. For example, you might want to enable a request whose origin is\n http://www.example.com
to access your Amazon S3 bucket at\n my.example.bucket.com
by using the browser's XMLHttpRequest
\n capability.
To enable cross-origin resource sharing (CORS) on a bucket, you add the\n cors
subresource to the bucket. The cors
subresource is an XML\n document in which you configure rules that identify origins and the HTTP methods that can\n be executed on your bucket. The document is limited to 64 KB in size.
When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) against a\n bucket, it evaluates the cors
configuration on the bucket and uses the first\n CORSRule
rule that matches the incoming browser request to enable a\n cross-origin request. For a rule to match, the following conditions must be met:
The request's Origin
header must match AllowedOrigin
\n elements.
The request method (for example, GET, PUT, HEAD, and so on) or the\n Access-Control-Request-Method
header in case of a pre-flight\n OPTIONS
request must be one of the AllowedMethod
\n elements.
Every header specified in the Access-Control-Request-Headers
request\n header of a pre-flight request must match an AllowedHeader
element.\n
For more information about CORS, go to Enabling Cross-Origin Resource Sharing in\n the Amazon S3 User Guide.
\nThe following operations are related to PutBucketCors
:
\n GetBucketCors\n
\n\n DeleteBucketCors\n
\n\n RESTOPTIONSobject\n
\nThis action uses the encryption
subresource to configure default encryption\n and Amazon S3 Bucket Keys for an existing bucket.
By default, all buckets have a default encryption configuration that\n uses server-side encryption with Amazon S3 managed keys (SSE-S3).\n You can optionally configure default encryption for a bucket by using server-side\n encryption with an Amazon Web Services KMS key (SSE-KMS) or a customer-provided key (SSE-C). If you specify default encryption by using\n SSE-KMS, you can also configure Amazon S3 Bucket Keys. For information about bucket default encryption,\n see Amazon S3\n bucket default encryption in the Amazon S3 User Guide. For more\n information about S3 Bucket Keys, see Amazon S3 Bucket Keys in the\n Amazon S3 User Guide.
\nThis action requires Amazon Web Services Signature Version 4. For more information, see \n Authenticating Requests (Amazon Web Services Signature Version 4).
\nTo use this operation, you must have permissions to perform the\n s3:PutEncryptionConfiguration
action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.
The following operations are related to PutBucketEncryption
:
\n GetBucketEncryption\n
\nThis action uses the encryption
subresource to configure default encryption\n and Amazon S3 Bucket Keys for an existing bucket.
By default, all buckets have a default encryption configuration that uses server-side\n encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption\n for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS),\n dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side\n encryption with customer-provided keys (SSE-C). If you specify default encryption by using\n SSE-KMS, you can also configure Amazon S3 Bucket Keys. For information about bucket default\n encryption, see Amazon S3 bucket default encryption\n in the Amazon S3 User Guide. For more information about S3 Bucket Keys, see\n Amazon S3 Bucket\n Keys in the Amazon S3 User Guide.
\nThis action requires Amazon Web Services Signature Version 4. For more information, see \n Authenticating Requests (Amazon Web Services Signature Version 4).
\nTo use this operation, you must have permission to perform the\n s3:PutEncryptionConfiguration
action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.
The following operations are related to PutBucketEncryption
:
\n GetBucketEncryption\n
\nSpecifies default encryption for a bucket using server-side encryption with different\n key options. By default, all buckets have a default encryption configuration that\n uses server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption for a bucket by using server-side\n encryption with an Amazon Web Services KMS key (SSE-KMS) or a customer-provided key (SSE-C). For information about the bucket default\n encryption feature, see Amazon S3 Bucket Default Encryption\n in the Amazon S3 User Guide.
", + "smithy.api#documentation": "Specifies default encryption for a bucket using server-side encryption with different\n key options. By default, all buckets have a default encryption configuration that uses\n server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure\n default encryption for a bucket by using server-side encryption with an Amazon Web Services KMS key\n (SSE-KMS) or a customer-provided key (SSE-C). For information about the bucket default\n encryption feature, see Amazon S3 Bucket Default Encryption\n in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -30159,7 +32200,36 @@ "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle\n configuration. Keep in mind that this will overwrite an existing lifecycle configuration,\n so if you want to retain any configuration details, they must be included in the new\n lifecycle configuration. For information about lifecycle configuration, see Managing\n your storage lifecycle.
\nBucket lifecycle configuration now supports specifying a lifecycle rule using an\n object key name prefix, one or more object tags, or a combination of both. Accordingly,\n this section describes the latest API. The previous version of the API supported\n filtering based only on an object key name prefix, which is supported for backward\n compatibility. For the related API description, see PutBucketLifecycle.
\nYou specify the lifecycle configuration in your request body. The lifecycle\n configuration is specified as XML consisting of one or more rules. An Amazon S3 Lifecycle\n configuration can have up to 1,000 rules. This limit is not adjustable. Each rule consists\n of the following:
\nFilter identifying a subset of objects to which the rule applies. The filter can\n be based on a key name prefix, object tags, or a combination of both.
\nStatus whether the rule is in effect.
\nOne or more lifecycle transition and expiration actions that you want Amazon S3 to\n perform on the objects identified by the filter. If the state of your bucket is\n versioning-enabled or versioning-suspended, you can have many versions of the same\n object (one current version and zero or more noncurrent versions). Amazon S3 provides\n predefined actions that you can specify for current and noncurrent object\n versions.
\nFor more information, see Object Lifecycle Management\n and Lifecycle Configuration Elements.
\nBy default, all Amazon S3 resources are private, including buckets, objects, and related\n subresources (for example, lifecycle configuration and website configuration). Only the\n resource owner (that is, the Amazon Web Services account that created it) can access the resource. The\n resource owner can optionally grant access permissions to others by writing an access\n policy. For this operation, a user must get the s3:PutLifecycleConfiguration
\n permission.
You can also explicitly deny permissions. Explicit deny also supersedes any other\n permissions. If you want to block users or accounts from removing or deleting objects from\n your bucket, you must deny them permissions for the following actions:
\n\n s3:DeleteObject
\n
\n s3:DeleteObjectVersion
\n
\n s3:PutLifecycleConfiguration
\n
For more information about permissions, see Managing Access Permissions to\n Your Amazon S3 Resources.
\nThe following operations are related to PutBucketLifecycleConfiguration
:
Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle\n configuration. Keep in mind that this will overwrite an existing lifecycle configuration,\n so if you want to retain any configuration details, they must be included in the new\n lifecycle configuration. For information about lifecycle configuration, see Managing\n your storage lifecycle.
\nBucket lifecycle configuration now supports specifying a lifecycle rule using an\n object key name prefix, one or more object tags, or a combination of both. Accordingly,\n this section describes the latest API. The previous version of the API supported\n filtering based only on an object key name prefix, which is supported for backward\n compatibility. For the related API description, see PutBucketLifecycle.
\nYou specify the lifecycle configuration in your request body. The lifecycle\n configuration is specified as XML consisting of one or more rules. An Amazon S3 Lifecycle\n configuration can have up to 1,000 rules. This limit is not adjustable. Each rule consists\n of the following:
\nA filter identifying a subset of objects to which the rule applies. The filter can\n be based on a key name prefix, object tags, or a combination of both.
\nA status indicating whether the rule is in effect.
\nOne or more lifecycle transition and expiration actions that you want Amazon S3 to\n perform on the objects identified by the filter. If the state of your bucket is\n versioning-enabled or versioning-suspended, you can have many versions of the same\n object (one current version and zero or more noncurrent versions). Amazon S3 provides\n predefined actions that you can specify for current and noncurrent object\n versions.
\nFor more information, see Object Lifecycle Management\n and Lifecycle Configuration Elements.
\nBy default, all Amazon S3 resources are private, including buckets, objects, and related\n subresources (for example, lifecycle configuration and website configuration). Only the\n resource owner (that is, the Amazon Web Services account that created it) can access the resource. The\n resource owner can optionally grant access permissions to others by writing an access\n policy. For this operation, a user must get the s3:PutLifecycleConfiguration
\n permission.
You can also explicitly deny permissions. An explicit deny also supersedes any other\n permissions. If you want to block users or accounts from removing or deleting objects from\n your bucket, you must deny them permissions for the following actions:
\n\n s3:DeleteObject
\n
\n s3:DeleteObjectVersion
\n
\n s3:PutLifecycleConfiguration
\n
For more information about permissions, see Managing Access Permissions to\n Your Amazon S3 Resources.
\nThe following operations are related to PutBucketLifecycleConfiguration
:
Set the logging parameters for a bucket and to specify permissions for who can view and\n modify the logging parameters. All logs are saved to buckets in the same Amazon Web Services Region as\n the source bucket. To set the logging status of a bucket, you must be the bucket\n owner.
\nThe bucket owner is automatically granted FULL_CONTROL to all logs. You use the\n Grantee
request element to grant access to other people. The\n Permissions
request element specifies the kind of access the grantee has to\n the logs.
If the target bucket for log delivery uses the bucket owner enforced setting for S3\n Object Ownership, you can't use the Grantee
request element to grant access\n to others. Permissions can only be granted using policies. For more information, see\n Permissions for server access log delivery in the\n Amazon S3 User Guide.
You can specify the person (grantee) to whom you're assigning access rights (using\n request elements) in the following ways:
\nBy the person's ID:
\n\n
\n
DisplayName is optional and ignored in the request.
\nBy Email address:
\n\n
\n
The grantee is resolved to the CanonicalUser and, in a response to a GET Object\n acl request, appears as the CanonicalUser.
\nBy URI:
\n\n
\n
To enable logging, you use LoggingEnabled and its children request elements. To disable\n logging, you use an empty BucketLoggingStatus request element:
\n\n
\n
For more information about server access logging, see Server Access Logging in the\n Amazon S3 User Guide.
\nFor more information about creating a bucket, see CreateBucket. For more\n information about returning the logging status of a bucket, see GetBucketLogging.
\nThe following operations are related to PutBucketLogging
:
\n PutObject\n
\n\n DeleteBucket\n
\n\n CreateBucket\n
\n\n GetBucketLogging\n
\nSet the logging parameters for a bucket and to specify permissions for who can view and\n modify the logging parameters. All logs are saved to buckets in the same Amazon Web Services Region as\n the source bucket. To set the logging status of a bucket, you must be the bucket\n owner.
\nThe bucket owner is automatically granted FULL_CONTROL to all logs. You use the\n Grantee
request element to grant access to other people. The\n Permissions
request element specifies the kind of access the grantee has to\n the logs.
If the target bucket for log delivery uses the bucket owner enforced setting for S3\n Object Ownership, you can't use the Grantee
request element to grant access\n to others. Permissions can only be granted using policies. For more information, see\n Permissions for server access log delivery in the\n Amazon S3 User Guide.
You can specify the person (grantee) to whom you're assigning access rights (by using\n request elements) in the following ways:
\nBy the person's ID:
\n\n
\n
\n DisplayName
is optional and ignored in the request.
By Email address:
\n\n
\n
The grantee is resolved to the CanonicalUser
and, in a response to a GETObjectAcl
\n request, appears as the CanonicalUser.
By URI:
\n\n
\n
To enable logging, you use LoggingEnabled
and its children request elements. To disable\n logging, you use an empty BucketLoggingStatus
request element:
\n
\n
For more information about server access logging, see Server Access Logging in the\n Amazon S3 User Guide.
\nFor more information about creating a bucket, see CreateBucket. For more\n information about returning the logging status of a bucket, see GetBucketLogging.
\nThe following operations are related to PutBucketLogging
:
\n PutObject\n
\n\n DeleteBucket\n
\n\n CreateBucket\n
\n\n GetBucketLogging\n
\nSets a metrics configuration (specified by the metrics configuration ID) for the bucket.\n You can have up to 1,000 metrics configurations per bucket. If you're updating an existing\n metrics configuration, note that this is a full replacement of the existing metrics\n configuration. If you don't include the elements you want to keep, they are erased.
\nTo use this operation, you must have permissions to perform the\n s3:PutMetricsConfiguration
action. The bucket owner has this permission by\n default. The bucket owner can grant this permission to others. For more information about\n permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.
For information about CloudWatch request metrics for Amazon S3, see Monitoring\n Metrics with Amazon CloudWatch.
\nThe following operations are related to\n PutBucketMetricsConfiguration
:
\n GetBucketLifecycle
has the following special error:
Error code: TooManyConfigurations
\n
Description: You are attempting to create a new configuration but have\n already reached the 1,000-configuration limit.
\nHTTP Status Code: HTTP 400 Bad Request
\nSets a metrics configuration (specified by the metrics configuration ID) for the bucket.\n You can have up to 1,000 metrics configurations per bucket. If you're updating an existing\n metrics configuration, note that this is a full replacement of the existing metrics\n configuration. If you don't include the elements you want to keep, they are erased.
\nTo use this operation, you must have permissions to perform the\n s3:PutMetricsConfiguration
action. The bucket owner has this permission by\n default. The bucket owner can grant this permission to others. For more information about\n permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.
For information about CloudWatch request metrics for Amazon S3, see Monitoring\n Metrics with Amazon CloudWatch.
\nThe following operations are related to\n PutBucketMetricsConfiguration
:
\n PutBucketMetricsConfiguration
has the following special error:
Error code: TooManyConfigurations
\n
Description: You are attempting to create a new configuration but have\n already reached the 1,000-configuration limit.
\nHTTP Status Code: HTTP 400 Bad Request
\nEnables notifications of specified events for a bucket. For more information about event\n notifications, see Configuring Event\n Notifications.
\nUsing this API, you can replace an existing notification configuration. The\n configuration is an XML file that defines the event types that you want Amazon S3 to publish and\n the destination where you want Amazon S3 to publish an event notification when it detects an\n event of the specified type.
\nBy default, your bucket has no event notifications configured. That is, the notification\n configuration will be an empty NotificationConfiguration
.
\n
\n
\n \n
This action replaces the existing notification configuration with the configuration you\n include in the request body.
\nAfter Amazon S3 receives this request, it first verifies that any Amazon Simple Notification\n Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) destination exists, and\n that the bucket owner has permission to publish to it by sending a test notification. In\n the case of Lambda destinations, Amazon S3 verifies that the Lambda function permissions\n grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more information,\n see Configuring Notifications for Amazon S3 Events.
\nYou can disable notifications by adding the empty NotificationConfiguration\n element.
\nFor more information about the number of event notification configurations that you can\n create per bucket, see Amazon S3 service quotas in Amazon Web Services\n General Reference.
\nBy default, only the bucket owner can configure notifications on a bucket. However,\n bucket owners can use a bucket policy to grant permission to other users to set this\n configuration with s3:PutBucketNotification
permission.
The PUT notification is an atomic operation. For example, suppose your notification\n configuration includes SNS topic, SQS queue, and Lambda function configurations. When\n you send a PUT request with this configuration, Amazon S3 sends test messages to your SNS\n topic. If the message fails, the entire PUT action will fail, and Amazon S3 will not add the\n configuration to your bucket.
\nIf the configuration in the request body includes only one\n TopicConfiguration
specifying only the\n s3:ReducedRedundancyLostObject
event type, the response will also include\n the x-amz-sns-test-message-id
header containing the message ID of the test\n notification sent to the topic.
The following action is related to\n PutBucketNotificationConfiguration
:
Enables notifications of specified events for a bucket. For more information about event\n notifications, see Configuring Event\n Notifications.
\nUsing this API, you can replace an existing notification configuration. The\n configuration is an XML file that defines the event types that you want Amazon S3 to publish and\n the destination where you want Amazon S3 to publish an event notification when it detects an\n event of the specified type.
\nBy default, your bucket has no event notifications configured. That is, the notification\n configuration will be an empty NotificationConfiguration
.
\n
\n
\n \n
This action replaces the existing notification configuration with the configuration you\n include in the request body.
\nAfter Amazon S3 receives this request, it first verifies that any Amazon Simple Notification\n Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) destination exists, and\n that the bucket owner has permission to publish to it by sending a test notification. In\n the case of Lambda destinations, Amazon S3 verifies that the Lambda function permissions\n grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more information,\n see Configuring Notifications for Amazon S3 Events.
\nYou can disable notifications by adding the empty NotificationConfiguration\n element.
\nFor more information about the number of event notification configurations that you can\n create per bucket, see Amazon S3 service quotas in Amazon Web Services\n General Reference.
\nBy default, only the bucket owner can configure notifications on a bucket. However,\n bucket owners can use a bucket policy to grant permission to other users to set this\n configuration with the required s3:PutBucketNotification
permission.
The PUT notification is an atomic operation. For example, suppose your notification\n configuration includes SNS topic, SQS queue, and Lambda function configurations. When\n you send a PUT request with this configuration, Amazon S3 sends test messages to your SNS\n topic. If the message fails, the entire PUT action will fail, and Amazon S3 will not add the\n configuration to your bucket.
\nIf the configuration in the request body includes only one\n TopicConfiguration
specifying only the\n s3:ReducedRedundancyLostObject
event type, the response will also include\n the x-amz-sns-test-message-id
header containing the message ID of the test\n notification sent to the topic.
The following action is related to\n PutBucketNotificationConfiguration
:
Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using an identity other than\n the root user of the Amazon Web Services account that owns the bucket, the calling identity must have the\n PutBucketPolicy
permissions on the specified bucket and belong to the\n bucket owner's account in order to use this operation.
If you don't have PutBucketPolicy
permissions, Amazon S3 returns a 403\n Access Denied
error. If you have the correct permissions, but you're not using an\n identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not\n Allowed
error.
To ensure that bucket owners don't inadvertently lock themselves out of their own\n buckets, the root principal in a bucket owner's Amazon Web Services account can perform the\n GetBucketPolicy
, PutBucketPolicy
, and\n DeleteBucketPolicy
API actions, even if their bucket policy explicitly\n denies the root principal's access. Bucket owner root principals can only be blocked from performing \n these API actions by VPC endpoint policies and Amazon Web Services Organizations policies.
For more information, see Bucket policy\n examples.
\nThe following operations are related to PutBucketPolicy
:
\n CreateBucket\n
\n\n DeleteBucket\n
\nCreates a replication configuration or replaces an existing one. For more information,\n see Replication in the Amazon S3 User Guide.
\nSpecify the replication configuration in the request body. In the replication\n configuration, you provide the name of the destination bucket or buckets where you want\n Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your\n behalf, and other relevant information.
\nA replication configuration must include at least one rule, and can contain a maximum of\n 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in\n the source bucket. To choose additional subsets of objects to replicate, add a rule for\n each subset.
\nTo specify a subset of the objects in the source bucket to apply a replication rule to,\n add the Filter element as a child of the Rule element. You can filter objects based on an\n object key prefix, one or more object tags, or both. When you add the Filter element in the\n configuration, you must also add the following elements:\n DeleteMarkerReplication
, Status
, and\n Priority
.
If you are using an earlier version of the replication configuration, Amazon S3 handles\n replication of delete markers differently. For more information, see Backward Compatibility.
\nFor information about enabling versioning on a bucket, see Using Versioning.
\nBy default, Amazon S3 doesn't replicate objects that are stored at rest using server-side\n encryption with KMS keys. To replicate Amazon Web Services KMS-encrypted objects, add the following:\n SourceSelectionCriteria
, SseKmsEncryptedObjects
,\n Status
, EncryptionConfiguration
, and\n ReplicaKmsKeyID
. For information about replication configuration, see\n Replicating Objects\n Created with SSE Using KMS keys.
For information on PutBucketReplication
errors, see List of\n replication-related error codes\n
To create a PutBucketReplication
request, you must have\n s3:PutReplicationConfiguration
permissions for the bucket.\n \n
By default, a resource owner, in this case the Amazon Web Services account that created the bucket,\n can perform this operation. The resource owner can also grant others permissions to perform\n the operation. For more information about permissions, see Specifying Permissions in a\n Policy and Managing Access Permissions to\n Your Amazon S3 Resources.
\nTo perform this operation, the user or role performing the action must have the\n iam:PassRole permission.
\nThe following operations are related to PutBucketReplication
:
\n GetBucketReplication\n
\nSets the request payment configuration for a bucket. By default, the bucket owner pays\n for downloads from the bucket. This configuration parameter enables the bucket owner (only)\n to specify that the person requesting the download will be charged for the download. For\n more information, see Requester Pays\n Buckets.
\nThe following operations are related to PutBucketRequestPayment
:
\n CreateBucket\n
\nSets the tags for a bucket.
\nUse tags to organize your Amazon Web Services bill to reflect your own cost structure. To do this,\n sign up to get your Amazon Web Services account bill with tag key values included. Then, to see the cost\n of combined resources, organize your billing information according to resources with the\n same tag key values. For example, you can tag several resources with a specific application\n name, and then organize your billing information to see the total cost of that application\n across several services. For more information, see Cost Allocation and\n Tagging and Using Cost Allocation in Amazon S3 Bucket\n Tags.
\nWhen this operation sets the tags for a bucket, it will overwrite any current tags\n the bucket already has. You cannot use this operation to add tags to an existing list of\n tags.
\nTo use this operation, you must have permissions to perform the\n s3:PutBucketTagging
action. The bucket owner has this permission by default\n and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.
\n PutBucketTagging
has the following special errors:
Error code: InvalidTagError
\n
Description: The tag provided was not a valid tag. This error can occur if\n the tag did not pass input validation. For information about tag restrictions,\n see User-Defined Tag Restrictions and Amazon Web Services-Generated Cost Allocation Tag Restrictions.
\nError code: MalformedXMLError
\n
Description: The XML provided does not match the schema.
\nError code: OperationAbortedError
\n
Description: A conflicting conditional action is currently in progress\n against this resource. Please try again.
\nError code: InternalError
\n
Description: The service was unable to apply the provided tag to the\n bucket.
\nThe following operations are related to PutBucketTagging
:
\n GetBucketTagging\n
\n\n DeleteBucketTagging\n
\nSets the versioning state of an existing bucket.
\nYou can set the versioning state with one of the following values:
\n\n Enabled—Enables versioning for the objects in the\n bucket. All objects added to the bucket receive a unique version ID.
\n\n Suspended—Disables versioning for the objects in the\n bucket. All objects added to the bucket receive the version ID null.
\nIf the versioning state has never been set on a bucket, it has no versioning state; a\n GetBucketVersioning request does not return a versioning state value.
\nIn order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner\n and want to enable MFA Delete in the bucket versioning configuration, you must include the\n x-amz-mfa request
header and the Status
and the\n MfaDelete
request elements in a request to set the versioning state of the\n bucket.
If you have an object expiration lifecycle configuration in your non-versioned bucket and\n you want to maintain the same permanent delete behavior when you enable versioning, you\n must add a noncurrent expiration policy. The noncurrent expiration lifecycle configuration will\n manage the deletes of the noncurrent object versions in the version-enabled bucket. (A\n version-enabled bucket maintains one current and zero or more noncurrent object\n versions.) For more information, see Lifecycle and Versioning.
\nThe following operations are related to PutBucketVersioning
:
\n CreateBucket\n
\n\n DeleteBucket\n
\n\n GetBucketVersioning\n
\nSets the configuration of the website that is specified in the website
\n subresource. To configure a bucket as a website, you can add this subresource on the bucket\n with website configuration information such as the file name of the index document and any\n redirect rules. For more information, see Hosting Websites on Amazon S3.
This PUT action requires the S3:PutBucketWebsite
permission. By default,\n only the bucket owner can configure the website attached to a bucket; however, bucket\n owners can allow other users to set the website configuration by writing a bucket policy\n that grants them the S3:PutBucketWebsite
permission.
To redirect all website requests sent to the bucket's website endpoint, you add a\n website configuration with the following elements. Because all requests are sent to another\n website, you don't need to provide index document name for the bucket.
\n\n WebsiteConfiguration
\n
\n RedirectAllRequestsTo
\n
\n HostName
\n
\n Protocol
\n
If you want granular control over redirects, you can use the following elements to add\n routing rules that describe conditions for redirecting requests and information about the\n redirect destination. In this case, the website configuration must provide an index\n document for the bucket, because some requests might not be redirected.
\n\n WebsiteConfiguration
\n
\n IndexDocument
\n
\n Suffix
\n
\n ErrorDocument
\n
\n Key
\n
\n RoutingRules
\n
\n RoutingRule
\n
\n Condition
\n
\n HttpErrorCodeReturnedEquals
\n
\n KeyPrefixEquals
\n
\n Redirect
\n
\n Protocol
\n
\n HostName
\n
\n ReplaceKeyPrefixWith
\n
\n ReplaceKeyWith
\n
\n HttpRedirectCode
\n
Amazon S3 has a limitation of 50 routing rules per website configuration. If you require more\n than 50 routing rules, you can use object redirect. For more information, see Configuring an\n Object Redirect in the Amazon S3 User Guide.
", + "smithy.api#examples": [ + { + "title": "Set website configuration on a bucket", + "documentation": "The following example adds website configuration to a bucket.", + "input": { + "Bucket": "examplebucket", + "ContentMD5": "", + "WebsiteConfiguration": { + "IndexDocument": { + "Suffix": "index.html" + }, + "ErrorDocument": { + "Key": "error.html" + } + } + } + } + ], "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}?website", @@ -30911,7 +33120,23 @@ "aws.protocols#httpChecksum": { "requestAlgorithmMember": "ChecksumAlgorithm" }, - "smithy.api#documentation": "Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object\n to it.
\nAmazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the\n entire object to the bucket. You cannot use PutObject
to only update a\n single piece of metadata for an existing object. You must put the entire object with\n updated metadata if you want to update some values.
Amazon S3 is a distributed system. If it receives multiple write requests for the same object\n simultaneously, it overwrites all but the last object written. To prevent objects from\n being deleted or overwritten, you can use Amazon S3 Object\n Lock.
\nTo ensure that data is not corrupted traversing the network, use the\n Content-MD5
header. When you use this header, Amazon S3 checks the object\n against the provided MD5 value and, if they do not match, returns an error. Additionally,\n you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to\n the calculated MD5 value.
To successfully complete the PutObject
request, you must have the\n s3:PutObject
in your IAM permissions.
To successfully change the objects acl of your PutObject
request,\n you must have the s3:PutObjectAcl
in your IAM permissions.
To successfully set the tag-set with your PutObject
request, you\n must have the s3:PutObjectTagging
in your IAM permissions.
The Content-MD5
header is required for any request to upload an\n object with a retention period configured using Amazon S3 Object Lock. For more\n information about Amazon S3 Object Lock, see Amazon S3 Object Lock\n Overview in the Amazon S3 User Guide.
You have three mutually exclusive options to protect data using server-side encryption\n in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the\n encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and\n customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side encryption by using\n Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at by\n rest using server-side encryption with other key options. For more information, see Using\n Server-Side Encryption.
\nWhen adding a new object, you can use headers to grant ACL-based permissions to\n individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are\n then added to the ACL on the object. By default, all objects are private. Only the owner\n has full access control. For more information, see Access Control List (ACL) Overview\n and Managing\n ACLs Using the REST API.
\nIf the bucket that you're uploading objects to uses the bucket owner enforced setting\n for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that\n use this setting only accept PUT requests that don't specify an ACL or PUT requests that\n specify bucket owner full control ACLs, such as the bucket-owner-full-control
\n canned ACL or an equivalent form of this ACL expressed in the XML format. PUT requests that\n contain other ACLs (for example, custom grants to certain Amazon Web Services accounts) fail and return a\n 400
error with the error code AccessControlListNotSupported
.\n For more information, see Controlling ownership of\n objects and disabling ACLs in the Amazon S3 User Guide.
If your bucket uses the bucket owner enforced setting for Object Ownership, all\n objects written to the bucket by any account will be owned by the bucket owner.
\nBy default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The\n STANDARD storage class provides high durability and high availability. Depending on\n performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses\n the OUTPOSTS Storage Class. For more information, see Storage Classes in the\n Amazon S3 User Guide.
\nIf you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID\n for the object being stored. Amazon S3 returns this ID in the response. When you enable\n versioning for a bucket, if Amazon S3 receives multiple write requests for the same object\n simultaneously, it stores all of the objects. For more information about versioning, see\n Adding Objects to\n Versioning-Enabled Buckets. For information about returning the versioning state\n of a bucket, see GetBucketVersioning.
\nFor more information about related Amazon S3 APIs, see the following:
\n\n CopyObject\n
\n\n DeleteObject\n
\nAdds an object to a bucket. You must have WRITE permissions on a bucket to add an object\n to it.
\nAmazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the\n entire object to the bucket. You cannot use PutObject
to only update a\n single piece of metadata for an existing object. You must put the entire object with\n updated metadata if you want to update some values.
Amazon S3 is a distributed system. If it receives multiple write requests for the same object\n simultaneously, it overwrites all but the last object written. To prevent objects from\n being deleted or overwritten, you can use Amazon S3 Object\n Lock.
\nTo ensure that data is not corrupted traversing the network, use the\n Content-MD5
header. When you use this header, Amazon S3 checks the object\n against the provided MD5 value and, if they do not match, returns an error. Additionally,\n you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to\n the calculated MD5 value.
To successfully complete the PutObject
request, you must have the\n s3:PutObject
in your IAM permissions.
To successfully change the objects acl of your PutObject
request,\n you must have the s3:PutObjectAcl
in your IAM permissions.
To successfully set the tag-set with your PutObject
request, you\n must have the s3:PutObjectTagging
in your IAM permissions.
The Content-MD5
header is required for any request to upload an\n object with a retention period configured using Amazon S3 Object Lock. For more\n information about Amazon S3 Object Lock, see Amazon S3 Object Lock\n Overview in the Amazon S3 User Guide.
You have four mutually exclusive options to protect data using server-side encryption in\n Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the\n encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or\n DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side\n encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to\n encrypt data at rest by using server-side encryption with other key options. For more\n information, see Using Server-Side\n Encryption.
\nWhen adding a new object, you can use headers to grant ACL-based permissions to\n individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are\n then added to the ACL on the object. By default, all objects are private. Only the owner\n has full access control. For more information, see Access Control List (ACL) Overview\n and Managing\n ACLs Using the REST API.
\nIf the bucket that you're uploading objects to uses the bucket owner enforced setting\n for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that\n use this setting only accept PUT requests that don't specify an ACL or PUT requests that\n specify bucket owner full control ACLs, such as the bucket-owner-full-control
\n canned ACL or an equivalent form of this ACL expressed in the XML format. PUT requests that\n contain other ACLs (for example, custom grants to certain Amazon Web Services accounts) fail and return a\n 400
error with the error code AccessControlListNotSupported
.\n For more information, see Controlling ownership of\n objects and disabling ACLs in the Amazon S3 User Guide.
If your bucket uses the bucket owner enforced setting for Object Ownership, all\n objects written to the bucket by any account will be owned by the bucket owner.
\nBy default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The\n STANDARD storage class provides high durability and high availability. Depending on\n performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses\n the OUTPOSTS Storage Class. For more information, see Storage Classes in the\n Amazon S3 User Guide.
\nIf you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID\n for the object being stored. Amazon S3 returns this ID in the response. When you enable\n versioning for a bucket, if Amazon S3 receives multiple write requests for the same object\n simultaneously, it stores all of the objects. For more information about versioning, see\n Adding Objects to\n Versioning-Enabled Buckets. For information about returning the versioning state\n of a bucket, see GetBucketVersioning.
\nFor more information about related Amazon S3 APIs, see the following:
\n\n CopyObject\n
\n\n DeleteObject\n
\nUses the acl
subresource to set the access control list (ACL) permissions\n for a new or existing object in an S3 bucket. You must have WRITE_ACP
\n permission to set the ACL of an object. For more information, see What\n permissions can I grant? in the Amazon S3 User Guide.
This action is not supported by Amazon S3 on Outposts.
\nDepending on your application needs, you can choose to set the ACL on an object using\n either the request body or the headers. For example, if you have an existing application\n that updates a bucket ACL using the request body, you can continue to use that approach.\n For more information, see Access Control List (ACL) Overview\n in the Amazon S3 User Guide.
\nIf your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs\n are disabled and no longer affect permissions. You must use policies to grant access to\n your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return\n the AccessControlListNotSupported
error code. Requests to read ACLs are\n still supported. For more information, see Controlling object\n ownership in the Amazon S3 User Guide.
You can set access permissions using one of the following methods:
\nSpecify a canned ACL with the x-amz-acl
request header. Amazon S3 supports\n a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set\n of grantees and permissions. Specify the canned ACL name as the value of\n x-amz-ac
l. If you use this header, you cannot use other access\n control-specific headers in your request. For more information, see Canned\n ACL.
Specify access permissions explicitly with the x-amz-grant-read
,\n x-amz-grant-read-acp
, x-amz-grant-write-acp
, and\n x-amz-grant-full-control
headers. When using these headers, you\n specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who\n will receive the permission. If you use these ACL-specific headers, you cannot use\n x-amz-acl
header to set a canned ACL. These parameters map to the set\n of permissions that Amazon S3 supports in an ACL. For more information, see Access Control\n List (ACL) Overview.
You specify each grantee as a type=value pair, where the type is one of the\n following:
\n\n id
– if the value specified is the canonical user ID of an\n Amazon Web Services account
\n uri
– if you are granting permissions to a predefined\n group
\n emailAddress
– if the value specified is the email address of\n an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
\nFor example, the following x-amz-grant-read
header grants list\n objects permission to the two Amazon Web Services accounts identified by their email\n addresses.
\n x-amz-grant-read: emailAddress=\"xyz@amazon.com\",\n emailAddress=\"abc@amazon.com\"
\n
You can use either a canned ACL or specify access permissions explicitly. You cannot do\n both.
\nYou can specify the person (grantee) to whom you're assigning access rights (using\n request elements) in the following ways:
\nBy the person's ID:
\n\n
\n
DisplayName is optional and ignored in the request.
\nBy URI:
\n\n
\n
By Email address:
\n\n
\n
The grantee is resolved to the CanonicalUser and, in a response to a GET Object\n acl request, appears as the CanonicalUser.
\nUsing email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
\nThe ACL of an object is set at the object version level. By default, PUT sets the ACL of\n the current version of an object. To set the ACL of a different version, use the\n versionId
subresource.
The following operations are related to PutObjectAcl
:
\n CopyObject\n
\n\n GetObject\n
\nThe server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256, aws:kms
).
The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256
, aws:kms
, aws:kms:dsse
).
If x-amz-server-side-encryption
is has a valid value of\n aws:kms
, this header specifies the ID of the Amazon Web Services Key Management Service\n (Amazon Web Services KMS) symmetric encryption customer managed key that was used for the object.
If x-amz-server-side-encryption
has a valid value of aws:kms
\n or aws:kms:dsse
, this header specifies the ID of the Key Management Service (KMS)\n symmetric encryption customer managed key that was used for the object.
Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption\n with Amazon Web Services KMS (SSE-KMS).
", + "smithy.api#documentation": "Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -31543,7 +33782,7 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256, aws:kms
).
The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256
, aws:kms
, aws:kms:dsse
).
If x-amz-server-side-encryption
has a valid value of aws:kms
,\n this header specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n encryption customer managed key that was used for the object. If you specify\n x-amz-server-side-encryption:aws:kms
, but do not provide\n x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the Amazon Web Services managed key to\n protect the data. If the KMS key does not exist in the same account issuing the command,\n you must use the full ARN and not just the ID.
If x-amz-server-side-encryption
has a valid value of aws:kms
\n or aws:kms:dsse
, this header specifies the ID of the Key Management Service (KMS)\n symmetric encryption customer managed key that was used for the object. If you specify\n x-amz-server-side-encryption:aws:kms
or\n x-amz-server-side-encryption:aws:kms:dsse
, but do not provide\n x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the Amazon Web Services managed key\n (aws/s3
) to protect the data. If the KMS key does not exist in the same\n account that's issuing the command, you must use the full ARN and not just the ID.
Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with\n server-side encryption using AWS KMS (SSE-KMS). Setting this header to true
\n causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.
Specifying this header with a PUT action doesn’t affect bucket-level settings for S3\n Bucket Key.
", + "smithy.api#documentation": "Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with\n server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to\n true
causes Amazon S3 to use an S3 Bucket Key for object encryption with\n SSE-KMS.
Specifying this header with a PUT action doesn’t affect bucket-level settings for S3\n Bucket Key.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -31776,6 +34015,31 @@ "requestChecksumRequired": true }, "smithy.api#documentation": "Sets the supplied tag-set to an object that already exists in a bucket.
\nA tag is a key-value pair. You can associate tags with an object by sending a PUT\n request against the tagging subresource that is associated with the object. You can\n retrieve tags by sending a GET request. For more information, see GetObjectTagging.
\nFor tagging-related restrictions related to characters and encodings, see Tag\n Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 tags per\n object.
\nTo use this operation, you must have permission to perform the\n s3:PutObjectTagging
action. By default, the bucket owner has this\n permission and can grant this permission to others.
To put tags of any other version, use the versionId
query parameter. You\n also need permission for the s3:PutObjectVersionTagging
action.
For information about the Amazon S3 object tagging feature, see Object Tagging.
\n\n PutObjectTagging
has the following special errors:
\n Code: InvalidTagError \n
\n\n Cause: The tag provided was not a valid tag. This error can occur\n if the tag did not pass input validation. For more information, see Object\n Tagging.\n
\n\n Code: MalformedXMLError \n
\n\n Cause: The XML provided does not match the schema.\n
\n\n Code: OperationAbortedError \n
\n\n Cause: A conflicting conditional action is currently in progress\n against this resource. Please try again.\n
\n\n Code: InternalError\n
\n\n Cause: The service was unable to apply the provided tag to the\n object.\n
\nThe following operations are related to PutObjectTagging
:
\n GetObjectTagging\n
\n\n DeleteObjectTagging\n
\nRestores an archived copy of an object back into Amazon S3
\nThis action is not supported by Amazon S3 on Outposts.
\nThis action performs the following types of requests:
\n\n select
- Perform a select query on an archived object
\n restore an archive
- Restore an archived object
For more information about the S3
structure in the request body, see the\n following:
\n PutObject\n
\n\n Managing Access with ACLs in the\n Amazon S3 User Guide\n
\n\n Protecting Data Using\n Server-Side Encryption in the\n Amazon S3 User Guide\n
\nDefine the SQL expression for the SELECT
type of restoration for your\n query in the request body's SelectParameters
structure. You can use\n expressions like the following examples.
The following expression returns all records from the specified\n object.
\n\n SELECT * FROM Object
\n
Assuming that you are not using any headers for data stored in the object,\n you can specify columns with positional headers.
\n\n SELECT s._1, s._2 FROM Object s WHERE s._3 > 100
\n
If you have headers and you set the fileHeaderInfo
in the\n CSV
structure in the request body to USE
, you can\n specify headers in the query. (If you set the fileHeaderInfo
field\n to IGNORE
, the first row is skipped for the query.) You cannot mix\n ordinal positions with header column names.
\n SELECT s.Id, s.FirstName, s.SSN FROM S3Object s
\n
When making a select request, you can also do the following:
\nTo expedite your queries, specify the Expedited
tier. For more\n information about tiers, see \"Restoring Archives,\" later in this topic.
Specify details about the data serialization format of both the input object that\n is being queried and the serialization of the CSV-encoded query results.
\nThe following are additional important facts about the select feature:
\nThe output results are new Amazon S3 objects. Unlike archive retrievals, they are\n stored until explicitly deleted-manually or through a lifecycle configuration.
\nYou can issue more than one select request on the same Amazon S3 object. Amazon S3 doesn't\n duplicate requests, so avoid issuing duplicate requests.
\n Amazon S3 accepts a select request even if the object has already been restored. A\n select request doesn’t return error response 409
.
To use this operation, you must have permissions to perform the\n s3:RestoreObject
action. The bucket owner has this permission by default\n and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.
Objects that you archive to the S3 Glacier Flexible Retrieval or\n S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or\n S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the\n S3 Glacier Flexible Retrieval or S3 Glacier Deep Archive storage\n classes, you must first initiate a restore request, and then wait until a temporary copy of\n the object is available. If you want a permanent copy of the object, create a copy of it in\n the Amazon S3 Standard storage class in your S3 bucket. To access an archived object, you must\n restore the object for the duration (number of days) that you specify. For objects in the\n Archive Access or Deep Archive Access tiers of S3 Intelligent-Tiering, you must first\n initiate a restore request, and then wait until the object is moved into the Frequent\n Access tier.
\nTo restore a specific object version, you can provide a version ID. If you don't provide\n a version ID, Amazon S3 restores the current version.
\nWhen restoring an archived object, you can specify one of the following data access tier\n options in the Tier
element of the request body:
\n Expedited
- Expedited retrievals allow you to quickly access your\n data stored in the S3 Glacier Flexible Retrieval storage class or\n S3 Intelligent-Tiering Archive tier when occasional urgent requests for restoring archives\n are required. For all but the largest archived objects (250 MB+), data accessed using\n Expedited retrievals is typically made available within 1–5 minutes. Provisioned\n capacity ensures that retrieval capacity for Expedited retrievals is available when\n you need it. Expedited retrievals and provisioned capacity are not available for\n objects stored in the S3 Glacier Deep Archive storage class or\n S3 Intelligent-Tiering Deep Archive tier.
\n Standard
- Standard retrievals allow you to access any of your\n archived objects within several hours. This is the default option for retrieval\n requests that do not specify the retrieval option. Standard retrievals typically\n finish within 3–5 hours for objects stored in the S3 Glacier Flexible\n Retrieval storage class or S3 Intelligent-Tiering Archive tier. They typically finish within\n 12 hours for objects stored in the S3 Glacier Deep Archive storage class or\n S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored in\n S3 Intelligent-Tiering.
\n Bulk
- Bulk retrievals free for objects stored in the S3 Glacier\n Flexible Retrieval and S3 Intelligent-Tiering storage classes, enabling you to\n retrieve large amounts, even petabytes, of data at no cost. Bulk retrievals typically\n finish within 5–12 hours for objects stored in the S3 Glacier\n Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. Bulk retrievals are\n also the lowest-cost retrieval option when restoring objects from\n S3 Glacier Deep Archive. They typically finish within 48 hours for objects\n stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive\n tier.
For more information about archive retrieval options and provisioned capacity for\n Expedited
data access, see Restoring Archived Objects in\n the Amazon S3 User Guide.
You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed\n while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the\n Amazon S3 User Guide.
\nTo get the status of object restoration, you can send a HEAD
request.\n Operations return the x-amz-restore
header, which provides information about\n the restoration status, in the response. You can use Amazon S3 event notifications to notify you\n when a restore is initiated or completed. For more information, see Configuring Amazon S3\n Event Notifications in the Amazon S3 User Guide.
After restoring an archived object, you can update the restoration period by reissuing\n the request with a new period. Amazon S3 updates the restoration period relative to the current\n time and charges only for the request-there are no data transfer charges. You cannot\n update the restoration period when Amazon S3 is actively processing your current restore request\n for the object.
\nIf your bucket has a lifecycle configuration with a rule that includes an expiration\n action, the object expiration overrides the life span that you specify in a restore\n request. For example, if you restore an object copy for 10 days, but the object is\n scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information\n about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management\n in Amazon S3 User Guide.
\nA successful action returns either the 200 OK
or 202 Accepted
\n status code.
If the object is not previously restored, then Amazon S3 returns 202\n Accepted
in the response.
If the object is previously restored, Amazon S3 returns 200 OK
in the\n response.
Special errors:
\n\n Code: RestoreAlreadyInProgress\n
\n\n Cause: Object restore is already in progress. (This error does not\n apply to SELECT type requests.)\n
\n\n HTTP Status Code: 409 Conflict\n
\n\n SOAP Fault Code Prefix: Client\n
\n\n Code: GlacierExpeditedRetrievalNotAvailable\n
\n\n Cause: expedited retrievals are currently not available. Try again\n later. (Returned if there is insufficient capacity to process the Expedited\n request. This error applies only to Expedited retrievals and not to\n S3 Standard or Bulk retrievals.)\n
\n\n HTTP Status Code: 503\n
\n\n SOAP Fault Code Prefix: N/A\n
\nThe following operations are related to RestoreObject
:
Restores an archived copy of an object back into Amazon S3
\nThis action is not supported by Amazon S3 on Outposts.
\nThis action performs the following types of requests:
\n\n select
- Perform a select query on an archived object
\n restore an archive
- Restore an archived object
For more information about the S3
structure in the request body, see the\n following:
\n PutObject\n
\n\n Managing Access with ACLs in the\n Amazon S3 User Guide\n
\n\n Protecting Data Using\n Server-Side Encryption in the\n Amazon S3 User Guide\n
\nDefine the SQL expression for the SELECT
type of restoration for your\n query in the request body's SelectParameters
structure. You can use\n expressions like the following examples.
The following expression returns all records from the specified\n object.
\n\n SELECT * FROM Object
\n
Assuming that you are not using any headers for data stored in the object,\n you can specify columns with positional headers.
\n\n SELECT s._1, s._2 FROM Object s WHERE s._3 > 100
\n
If you have headers and you set the fileHeaderInfo
in the\n CSV
structure in the request body to USE
, you can\n specify headers in the query. (If you set the fileHeaderInfo
field\n to IGNORE
, the first row is skipped for the query.) You cannot mix\n ordinal positions with header column names.
\n SELECT s.Id, s.FirstName, s.SSN FROM S3Object s
\n
When making a select request, you can also do the following:
\nTo expedite your queries, specify the Expedited
tier. For more\n information about tiers, see \"Restoring Archives,\" later in this topic.
Specify details about the data serialization format of both the input object that\n is being queried and the serialization of the CSV-encoded query results.
\nThe following are additional important facts about the select feature:
\nThe output results are new Amazon S3 objects. Unlike archive retrievals, they are\n stored until explicitly deleted-manually or through a lifecycle configuration.
\nYou can issue more than one select request on the same Amazon S3 object. Amazon S3 doesn't\n duplicate requests, so avoid issuing duplicate requests.
\n Amazon S3 accepts a select request even if the object has already been restored. A\n select request doesn’t return error response 409
.
To use this operation, you must have permissions to perform the\n s3:RestoreObject
action. The bucket owner has this permission by default\n and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.
Objects that you archive to the S3 Glacier Flexible Retrieval Flexible Retrieval or\n S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or\n S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the\n S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage\n classes, you must first initiate a restore request, and then wait until a temporary copy of\n the object is available. If you want a permanent copy of the object, create a copy of it in\n the Amazon S3 Standard storage class in your S3 bucket. To access an archived object, you must\n restore the object for the duration (number of days) that you specify. For objects in the\n Archive Access or Deep Archive Access tiers of S3 Intelligent-Tiering, you must first\n initiate a restore request, and then wait until the object is moved into the Frequent\n Access tier.
\nTo restore a specific object version, you can provide a version ID. If you don't provide\n a version ID, Amazon S3 restores the current version.
\nWhen restoring an archived object, you can specify one of the following data access tier\n options in the Tier
element of the request body:
\n Expedited
- Expedited retrievals allow you to quickly access your\n data stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or\n S3 Intelligent-Tiering Archive tier when occasional urgent requests for restoring archives\n are required. For all but the largest archived objects (250 MB+), data accessed using\n Expedited retrievals is typically made available within 1–5 minutes. Provisioned\n capacity ensures that retrieval capacity for Expedited retrievals is available when\n you need it. Expedited retrievals and provisioned capacity are not available for\n objects stored in the S3 Glacier Deep Archive storage class or\n S3 Intelligent-Tiering Deep Archive tier.
\n Standard
- Standard retrievals allow you to access any of your\n archived objects within several hours. This is the default option for retrieval\n requests that do not specify the retrieval option. Standard retrievals typically\n finish within 3–5 hours for objects stored in the S3 Glacier Flexible Retrieval Flexible\n Retrieval storage class or S3 Intelligent-Tiering Archive tier. They typically finish within\n 12 hours for objects stored in the S3 Glacier Deep Archive storage class or\n S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored in\n S3 Intelligent-Tiering.
\n Bulk
- Bulk retrievals free for objects stored in the S3 Glacier\n Flexible Retrieval and S3 Intelligent-Tiering storage classes, enabling you to\n retrieve large amounts, even petabytes, of data at no cost. Bulk retrievals typically\n finish within 5–12 hours for objects stored in the S3 Glacier Flexible Retrieval\n Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. Bulk retrievals are\n also the lowest-cost retrieval option when restoring objects from\n S3 Glacier Deep Archive. They typically finish within 48 hours for objects\n stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive\n tier.
For more information about archive retrieval options and provisioned capacity for\n Expedited
data access, see Restoring Archived Objects in\n the Amazon S3 User Guide.
You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed\n while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the\n Amazon S3 User Guide.
\nTo get the status of object restoration, you can send a HEAD
request.\n Operations return the x-amz-restore
header, which provides information about\n the restoration status, in the response. You can use Amazon S3 event notifications to notify you\n when a restore is initiated or completed. For more information, see Configuring Amazon S3\n Event Notifications in the Amazon S3 User Guide.
After restoring an archived object, you can update the restoration period by reissuing\n the request with a new period. Amazon S3 updates the restoration period relative to the current\n time and charges only for the request-there are no data transfer charges. You cannot\n update the restoration period when Amazon S3 is actively processing your current restore request\n for the object.
\nIf your bucket has a lifecycle configuration with a rule that includes an expiration\n action, the object expiration overrides the life span that you specify in a restore\n request. For example, if you restore an object copy for 10 days, but the object is\n scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information\n about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management\n in Amazon S3 User Guide.
\nA successful action returns either the 200 OK
or 202 Accepted
\n status code.
If the object is not previously restored, then Amazon S3 returns 202\n Accepted
in the response.
If the object is previously restored, Amazon S3 returns 200 OK
in the\n response.
Special errors:
\n\n Code: RestoreAlreadyInProgress\n
\n\n Cause: Object restore is already in progress. (This error does not\n apply to SELECT type requests.)\n
\n\n HTTP Status Code: 409 Conflict\n
\n\n SOAP Fault Code Prefix: Client\n
\n\n Code: GlacierExpeditedRetrievalNotAvailable\n
\n\n Cause: expedited retrievals are currently not available. Try again\n later. (Returned if there is insufficient capacity to process the Expedited\n request. This error applies only to Expedited retrievals and not to\n S3 Standard or Bulk retrievals.)\n
\n\n HTTP Status Code: 503\n
\n\n SOAP Fault Code Prefix: N/A\n
\nThe following operations are related to RestoreObject
:
Specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric encryption\n customer managed key to use for encrypting inventory reports.
", + "smithy.api#documentation": "Specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key to use for\n encrypting inventory reports.
", "smithy.api#required": {} } } @@ -32808,7 +35089,7 @@ "target": "com.amazonaws.s3#SelectObjectContentOutput" }, "traits": { - "smithy.api#documentation": "This action filters the contents of an Amazon S3 object based on a simple structured query\n language (SQL) statement. In the request, along with the SQL expression, you must also\n specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses\n this format to parse object data into records, and returns only records that match the\n specified SQL expression. You must also specify the data serialization format for the\n response.
\nThis action is not supported by Amazon S3 on Outposts.
\nFor more information about Amazon S3 Select, see Selecting Content from\n Objects and SELECT\n Command in the Amazon S3 User Guide.
\n \nYou must have s3:GetObject
permission for this operation. Amazon S3 Select does\n not support anonymous access. For more information about permissions, see Specifying\n Permissions in a Policy in the Amazon S3 User Guide.
You can use Amazon S3 Select to query objects that have the following format\n properties:
\n\n CSV, JSON, and Parquet - Objects must be in CSV, JSON, or\n Parquet format.
\n\n UTF-8 - UTF-8 is the only encoding type Amazon S3 Select\n supports.
\n\n GZIP or BZIP2 - CSV and JSON files can be compressed using\n GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select\n supports for CSV and JSON files. Amazon S3 Select supports columnar compression for\n Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression\n for Parquet objects.
\n\n Server-side encryption - Amazon S3 Select supports querying\n objects that are protected with server-side encryption.
\nFor objects that are encrypted with customer-provided encryption keys (SSE-C), you\n must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side\n Encryption (Using Customer-Provided Encryption Keys) in the\n Amazon S3 User Guide.
\nFor objects that are encrypted with Amazon S3 managed keys (SSE-S3) and Amazon Web Services KMS keys\n (SSE-KMS), server-side encryption is handled transparently, so you don't need to\n specify anything. For more information about server-side encryption, including SSE-S3\n and SSE-KMS, see Protecting Data Using\n Server-Side Encryption in the Amazon S3 User Guide.
\nGiven the response size is unknown, Amazon S3 Select streams the response as a series of\n messages and includes a Transfer-Encoding
header with chunked
as\n its value in the response. For more information, see Appendix: SelectObjectContent\n Response.
The SelectObjectContent
action does not support the following\n GetObject
functionality. For more information, see GetObject.
\n Range
: Although you can specify a scan range for an Amazon S3 Select request\n (see SelectObjectContentRequest - ScanRange in the request parameters),\n you cannot specify the range of bytes of an object to return.
GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot specify\n the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY
storage classes. For\n more information, about storage classes see Storage\n Classes in the Amazon S3 User Guide.
For a list of special errors for this operation, see List of\n SELECT Object Content Error Codes\n
\nThe following operations are related to SelectObjectContent
:
\n GetObject\n
\nThis action filters the contents of an Amazon S3 object based on a simple structured query\n language (SQL) statement. In the request, along with the SQL expression, you must also\n specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses\n this format to parse object data into records, and returns only records that match the\n specified SQL expression. You must also specify the data serialization format for the\n response.
\nThis action is not supported by Amazon S3 on Outposts.
\nFor more information about Amazon S3 Select, see Selecting Content from\n Objects and SELECT\n Command in the Amazon S3 User Guide.
\n \nYou must have s3:GetObject
permission for this operation. Amazon S3 Select does\n not support anonymous access. For more information about permissions, see Specifying\n Permissions in a Policy in the Amazon S3 User Guide.
You can use Amazon S3 Select to query objects that have the following format\n properties:
\n\n CSV, JSON, and Parquet - Objects must be in CSV, JSON, or\n Parquet format.
\n\n UTF-8 - UTF-8 is the only encoding type Amazon S3 Select\n supports.
\n\n GZIP or BZIP2 - CSV and JSON files can be compressed using\n GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select\n supports for CSV and JSON files. Amazon S3 Select supports columnar compression for\n Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression\n for Parquet objects.
\n\n Server-side encryption - Amazon S3 Select supports querying\n objects that are protected with server-side encryption.
\nFor objects that are encrypted with customer-provided encryption keys (SSE-C), you\n must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side\n Encryption (Using Customer-Provided Encryption Keys) in the\n Amazon S3 User Guide.
\nFor objects that are encrypted with Amazon S3 managed keys (SSE-S3) and Amazon Web Services KMS keys\n (SSE-KMS), server-side encryption is handled transparently, so you don't need to\n specify anything. For more information about server-side encryption, including SSE-S3\n and SSE-KMS, see Protecting Data Using\n Server-Side Encryption in the Amazon S3 User Guide.
\nGiven the response size is unknown, Amazon S3 Select streams the response as a series of\n messages and includes a Transfer-Encoding
header with chunked
as\n its value in the response. For more information, see Appendix: SelectObjectContent\n Response.
The SelectObjectContent
action does not support the following\n GetObject
functionality. For more information, see GetObject.
\n Range
: Although you can specify a scan range for an Amazon S3 Select request\n (see SelectObjectContentRequest - ScanRange in the request parameters),\n you cannot specify the range of bytes of an object to return.
The GLACIER
, DEEP_ARCHIVE
, and REDUCED_REDUNDANCY
storage classes, or the ARCHIVE_ACCESS
and \n DEEP_ARCHIVE_ACCESS
access tiers of \n the INTELLIGENT_TIERING
storage class: You cannot query objects in \n the GLACIER
, DEEP_ARCHIVE
, or REDUCED_REDUNDANCY
storage classes, nor objects in the \n ARCHIVE_ACCESS
or \n DEEP_ARCHIVE_ACCESS
access tiers of \n the INTELLIGENT_TIERING
storage class. For\n more information about storage classes, see Using Amazon S3 storage\n classes in the Amazon S3 User Guide.
For a list of special errors for this operation, see List of\n SELECT Object Content Error Codes\n
\nThe following operations are related to SelectObjectContent
:
\n GetObject\n
\nThe server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256, aws:kms
).
The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256
, aws:kms
).
If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n encryption customer managed key that was used for the object.
", + "smithy.api#documentation": "If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -33723,7 +36010,7 @@ "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Amazon Web Services KMS (SSE-KMS).
", + "smithy.api#documentation": "Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -33893,7 +36180,7 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256, aws:kms
).
The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256
, aws:kms
).
If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n encryption customer managed key was used for the object.
", + "smithy.api#documentation": "If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n was used for the object.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -33957,7 +36244,7 @@ "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Amazon Web Services KMS (SSE-KMS).
", + "smithy.api#documentation": "Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, diff --git a/aws/sdk/aws-models/sdk-endpoints.json b/aws/sdk/aws-models/sdk-endpoints.json index b94964b8f81f5fc34c14a222f447999fd11b3311..b74e1ac35da289eccee3be73b72bdf1675ba3e57 100644 --- a/aws/sdk/aws-models/sdk-endpoints.json +++ b/aws/sdk/aws-models/sdk-endpoints.json @@ -1076,6 +1076,7 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, @@ -2212,6 +2213,7 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, @@ -2836,6 +2838,7 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, @@ -3211,6 +3214,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -3271,6 +3275,13 @@ "deprecated" : true, "hostname" : "cognito-identity-fips.us-east-2.amazonaws.com" }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "cognito-identity-fips.us-west-1.amazonaws.com" + }, "fips-us-west-2" : { "credentialScope" : { "region" : "us-west-2" @@ -3292,7 +3303,12 @@ "tags" : [ "fips" ] } ] }, - "us-west-1" : { }, + "us-west-1" : { + "variants" : [ { + "hostname" : "cognito-identity-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "us-west-2" : { "variants" : [ { "hostname" : "cognito-identity-fips.us-west-2.amazonaws.com", @@ -4250,6 +4266,7 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "datasync-fips.ca-central-1.amazonaws.com", @@ -6612,6 +6629,7 @@ } ] }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "fms-fips.ca-central-1.amazonaws.com", @@ -6945,6 +6963,7 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "fsx-fips.ca-central-1.amazonaws.com", @@ -7955,11 +7974,6 @@ } }, "iot" : { - "defaults" : { - "credentialScope" : { - "service" : "execute-api" - } - }, "endpoints" : { "ap-east-1" : { }, "ap-northeast-1" : { }, @@ -7979,37 +7993,22 @@ "eu-west-2" : { }, "eu-west-3" : { }, "fips-ca-central-1" : { - "credentialScope" : { - "service" : "execute-api" - }, "deprecated" : true, "hostname" : "iot-fips.ca-central-1.amazonaws.com" }, "fips-us-east-1" : { - "credentialScope" : { - "service" : "execute-api" - }, "deprecated" : true, "hostname" : "iot-fips.us-east-1.amazonaws.com" }, "fips-us-east-2" : { - "credentialScope" : { - "service" : "execute-api" - }, "deprecated" : true, "hostname" : "iot-fips.us-east-2.amazonaws.com" }, "fips-us-west-1" : { - "credentialScope" : { - "service" : "execute-api" - }, "deprecated" : true, "hostname" : "iot-fips.us-west-1.amazonaws.com" }, "fips-us-west-2" : { - "credentialScope" : { - "service" : "execute-api" - }, "deprecated" : true, "hostname" : "iot-fips.us-west-2.amazonaws.com" }, @@ -8529,6 +8528,7 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "kafka-fips.ca-central-1.amazonaws.com", @@ -8635,6 +8635,7 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -8720,7 +8721,11 @@ "hostname" : "kendra-ranking.ap-southeast-4.api.aws" }, "ca-central-1" : { - "hostname" : "kendra-ranking.ca-central-1.api.aws" + "hostname" : "kendra-ranking.ca-central-1.api.aws", + "variants" : [ { + "hostname" : "kendra-ranking-fips.ca-central-1.api.aws", + "tags" : [ "fips" ] + } ] }, "eu-central-2" : { "hostname" : "kendra-ranking.eu-central-2.api.aws" @@ -8750,16 +8755,28 @@ "hostname" : "kendra-ranking.sa-east-1.api.aws" }, "us-east-1" : { - "hostname" : "kendra-ranking.us-east-1.api.aws" + "hostname" : "kendra-ranking.us-east-1.api.aws", + "variants" : [ { + "hostname" : "kendra-ranking-fips.us-east-1.api.aws", + "tags" : [ "fips" ] + } ] }, "us-east-2" : { - "hostname" : "kendra-ranking.us-east-2.api.aws" + "hostname" : "kendra-ranking.us-east-2.api.aws", + "variants" : [ { + "hostname" : "kendra-ranking-fips.us-east-2.api.aws", + "tags" : [ "fips" ] + } ] }, "us-west-1" : { "hostname" : "kendra-ranking.us-west-1.api.aws" }, "us-west-2" : { - "hostname" : "kendra-ranking.us-west-2.api.aws" + "hostname" : "kendra-ranking.us-west-2.api.aws", + "variants" : [ { + "hostname" : "kendra-ranking-fips.us-west-2.api.aws", + "tags" : [ "fips" ] + } ] } } }, @@ -10301,6 +10318,25 @@ "us-west-2" : { } } }, + "mediapackagev2" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, "mediastore" : { "endpoints" : { "ap-northeast-1" : { }, @@ -10449,6 +10485,7 @@ "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -10477,13 +10514,17 @@ "ap-northeast-2" : { }, "ap-northeast-3" : { }, "ap-south-1" : { }, + "ap-south-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-north-1" : { }, "eu-south-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -13041,6 +13082,7 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, @@ -13908,6 +13950,7 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, @@ -13977,6 +14020,8 @@ "securitylake" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, @@ -13985,6 +14030,7 @@ "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -16056,8 +16102,11 @@ }, "transcribestreaming" : { "endpoints" : { + "af-south-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, @@ -16145,6 +16194,7 @@ "ap-northeast-2" : { }, "ap-northeast-3" : { }, "ap-south-1" : { }, + "ap-south-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, @@ -16155,8 +16205,10 @@ } ] }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-north-1" : { }, "eu-south-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -16283,6 +16335,37 @@ } } }, + "verifiedpermissions" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, "voice-chime" : { "endpoints" : { "ap-northeast-1" : { }, @@ -18234,11 +18317,6 @@ } }, "iot" : { - "defaults" : { - "credentialScope" : { - "service" : "execute-api" - } - }, "endpoints" : { "cn-north-1" : { }, "cn-northwest-1" : { } @@ -21028,23 +21106,12 @@ } }, "iot" : { - "defaults" : { - "credentialScope" : { - "service" : "execute-api" - } - }, "endpoints" : { "fips-us-gov-east-1" : { - "credentialScope" : { - "service" : "execute-api" - }, "deprecated" : true, "hostname" : "iot-fips.us-gov-east-1.amazonaws.com" }, "fips-us-gov-west-1" : { - "credentialScope" : { - "service" : "execute-api" - }, "deprecated" : true, "hostname" : "iot-fips.us-gov-west-1.amazonaws.com" }, @@ -21519,6 +21586,36 @@ "us-gov-west-1" : { } } }, + "mgn" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "mgn-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "mgn-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "mgn-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "mgn-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, "models.lex" : { "defaults" : { "credentialScope" : { @@ -22516,6 +22613,12 @@ } } }, + "simspaceweaver" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, "sms" : { "endpoints" : { "fips-us-gov-east-1" : { @@ -23151,6 +23254,13 @@ }, "workspaces" : { "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "workspaces-fips.us-gov-east-1.amazonaws.com" + }, "fips-us-gov-west-1" : { "credentialScope" : { "region" : "us-gov-west-1" @@ -23158,7 +23268,12 @@ "deprecated" : true, "hostname" : "workspaces-fips.us-gov-west-1.amazonaws.com" }, - "us-gov-east-1" : { }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "workspaces-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "us-gov-west-1" : { "variants" : [ { "hostname" : "workspaces-fips.us-gov-west-1.amazonaws.com", @@ -23331,6 +23446,12 @@ "us-iso-west-1" : { } } }, + "dlm" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, "dms" : { "defaults" : { "variants" : [ { @@ -23659,7 +23780,8 @@ }, "route53resolver" : { "endpoints" : { - "us-iso-east-1" : { } + "us-iso-east-1" : { }, + "us-iso-west-1" : { } } }, "runtime.sagemaker" : { @@ -23760,7 +23882,8 @@ }, "tagging" : { "endpoints" : { - "us-iso-east-1" : { } + "us-iso-east-1" : { }, + "us-iso-west-1" : { } } }, "transcribe" : { @@ -24252,6 +24375,23 @@ "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$", "regions" : { }, "services" : { } + }, { + "defaults" : { + "hostname" : "{service}.{region}.{dnsSuffix}", + "protocols" : [ "https" ], + "signatureVersions" : [ "v4" ], + "variants" : [ { + "dnsSuffix" : "csp.hci.ic.gov", + "hostname" : "{service}-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "dnsSuffix" : "csp.hci.ic.gov", + "partition" : "aws-iso-f", + "partitionName" : "AWS ISOF", + "regionRegex" : "^us\\-isof\\-\\w+\\-\\d+$", + "regions" : { }, + "services" : { } } ], "version" : 3 } diff --git a/aws/sdk/aws-models/timestream-query.json b/aws/sdk/aws-models/timestream-query.json index 9545cf1b2b90c18c0a2a7867b13d2a98dca10d7a..29edeadd2a3d51040f0a0b96856091b722dd6140 100644 --- a/aws/sdk/aws-models/timestream-query.json +++ b/aws/sdk/aws-models/timestream-query.json @@ -2637,8 +2637,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -2650,8 +2650,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -2663,8 +2663,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -2676,8 +2676,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -2689,8 +2689,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -2702,8 +2702,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -2715,8 +2715,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -2728,8 +2728,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -2741,8 +2741,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -2754,8 +2754,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -2767,8 +2767,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -2780,8 +2780,19 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -2793,8 +2804,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -2806,8 +2828,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -2819,8 +2852,19 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -2832,8 +2876,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -2845,8 +2889,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -2858,8 +2902,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -2870,8 +2914,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -2882,10 +2926,16 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/aws/sdk/aws-models/timestream-write.json b/aws/sdk/aws-models/timestream-write.json index 568ab520a0a074d1754baf246938fc2b3c7565a5..a3378d042b1415debc29ee6b52c40eb8e8d52b2f 100644 --- a/aws/sdk/aws-models/timestream-write.json +++ b/aws/sdk/aws-models/timestream-write.json @@ -236,13 +236,13 @@ "ReportConfiguration": { "target": "com.amazonaws.timestreamwrite#ReportConfiguration", "traits": { - "smithy.api#documentation": "Report configuration for a batch load task. This contains details about where error reports are stored.
" + "smithy.api#documentation": "Report configuration for a batch load task. This contains details about where error\n reports are stored.
" } }, "DataModelConfiguration": { "target": "com.amazonaws.timestreamwrite#DataModelConfiguration", "traits": { - "smithy.api#documentation": "Data model configuration for a batch load task. This contains details about where a data model for a batch load task is stored.
" + "smithy.api#documentation": "Data model configuration for a batch load task. This contains details about where a data\n model for a batch load task is stored.
" } }, "TargetDatabaseName": { @@ -376,7 +376,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": true }, - "smithy.api#documentation": "Creates a new Timestream batch load task. A batch load task processes data from\n a CSV source in an S3 location and writes to a Timestream table. A mapping from\n source to target is defined in a batch load task. Errors and events are written to a report\n at an S3 location. For the report, if the KMS key is not specified, the\n batch load task will be encrypted with a Timestream managed KMS key\n located in your account. For more information, see Amazon Web Services managed\n keys. Service quotas apply. For\n details, see code\n sample.
" + "smithy.api#documentation": "Creates a new Timestream batch load task. A batch load task processes data from\n a CSV source in an S3 location and writes to a Timestream table. A mapping from\n source to target is defined in a batch load task. Errors and events are written to a report\n at an S3 location. For the report, if the KMS key is not specified, the\n report will be encrypted with an S3 managed key when SSE_S3
is the option.\n Otherwise an error is thrown. For more information, see Amazon Web Services managed\n keys. Service quotas apply. For\n details, see code\n sample.
Contains properties to set on the table when enabling magnetic store writes.
" } + }, + "Schema": { + "target": "com.amazonaws.timestreamwrite#Schema", + "traits": { + "smithy.api#documentation": "The schema of the table.
" + } } }, "traits": { @@ -1709,7 +1715,7 @@ "Value": { "target": "com.amazonaws.timestreamwrite#StringValue2048", "traits": { - "smithy.api#documentation": "The value for the MeasureValue.
", + "smithy.api#documentation": "The value for the MeasureValue. For information, see Data\n types.
", "smithy.api#required": {} } }, @@ -1909,6 +1915,78 @@ } } }, + "com.amazonaws.timestreamwrite#PartitionKey": { + "type": "structure", + "members": { + "Type": { + "target": "com.amazonaws.timestreamwrite#PartitionKeyType", + "traits": { + "smithy.api#documentation": "The type of the partition key. Options are DIMENSION (dimension key) and MEASURE\n (measure key).
", + "smithy.api#required": {} + } + }, + "Name": { + "target": "com.amazonaws.timestreamwrite#SchemaName", + "traits": { + "smithy.api#documentation": "The name of the attribute used for a dimension key.
" + } + }, + "EnforcementInRecord": { + "target": "com.amazonaws.timestreamwrite#PartitionKeyEnforcementLevel", + "traits": { + "smithy.api#documentation": "The level of enforcement for the specification of a dimension key in ingested records.\n Options are REQUIRED (dimension key must be specified) and OPTIONAL (dimension key does not\n have to be specified).
" + } + } + }, + "traits": { + "smithy.api#documentation": "An attribute used in partitioning data in a table. A dimension key partitions data\n using the values of the dimension specified by the dimension-name as partition key, while a\n measure key partitions data using measure names (values of the 'measure_name' column).\n
" + } + }, + "com.amazonaws.timestreamwrite#PartitionKeyEnforcementLevel": { + "type": "enum", + "members": { + "REQUIRED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REQUIRED" + } + }, + "OPTIONAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "OPTIONAL" + } + } + } + }, + "com.amazonaws.timestreamwrite#PartitionKeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.timestreamwrite#PartitionKey" + }, + "traits": { + "smithy.api#length": { + "min": 1 + } + } + }, + "com.amazonaws.timestreamwrite#PartitionKeyType": { + "type": "enum", + "members": { + "DIMENSION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DIMENSION" + } + }, + "MEASURE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MEASURE" + } + } + } + }, "com.amazonaws.timestreamwrite#Record": { "type": "structure", "members": { @@ -1933,7 +2011,7 @@ "MeasureValueType": { "target": "com.amazonaws.timestreamwrite#MeasureValueType", "traits": { - "smithy.api#documentation": " Contains the data type of the measure value for the time-series data point. Default\n type is DOUBLE
.
Contains the data type of the measure value for the time-series data point. Default\n type is DOUBLE
. For more information, see Data\n types.
Report configuration for a batch load task. This contains details about where error reports are stored.
" + "smithy.api#documentation": "Report configuration for a batch load task. This contains details about where error\n reports are stored.
" } }, "com.amazonaws.timestreamwrite#ReportS3Configuration": { @@ -2336,6 +2414,20 @@ } } }, + "com.amazonaws.timestreamwrite#Schema": { + "type": "structure", + "members": { + "CompositePartitionKey": { + "target": "com.amazonaws.timestreamwrite#PartitionKeyList", + "traits": { + "smithy.api#documentation": "A non-empty list of partition keys defining the attributes used to partition the table\n data. The order of the list determines the partition hierarchy. The name and type of each\n partition key as well as the partition key order cannot be changed after the table is\n created. However, the enforcement level of each partition key can be changed.
" + } + } + }, + "traits": { + "smithy.api#documentation": "A Schema specifies the expected data model of the table.
" + } + }, "com.amazonaws.timestreamwrite#SchemaName": { "type": "string", "traits": { @@ -2440,6 +2532,12 @@ "traits": { "smithy.api#documentation": "Contains properties to set on the table when enabling magnetic store writes.
" } + }, + "Schema": { + "target": "com.amazonaws.timestreamwrite#Schema", + "traits": { + "smithy.api#documentation": "The schema of the table.
" + } } }, "traits": { @@ -3085,9 +3183,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -3098,9 +3196,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -3111,9 +3209,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -3124,9 +3222,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -3137,9 +3235,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -3150,9 +3248,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -3163,9 +3261,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -3176,9 +3274,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -3189,9 +3287,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -3202,9 +3300,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -3215,9 +3313,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -3228,9 +3326,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -3241,9 +3350,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -3254,9 +3374,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -3267,9 +3398,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -3280,9 +3422,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -3293,9 +3435,9 @@ } }, "params": { - "UseDualStack": false, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -3307,8 +3449,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -3318,9 +3460,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -3330,11 +3472,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" @@ -3536,6 +3684,12 @@ "traits": { "smithy.api#documentation": "Contains properties to set on the table when enabling magnetic store writes.
" } + }, + "Schema": { + "target": "com.amazonaws.timestreamwrite#Schema", + "traits": { + "smithy.api#documentation": "The schema of the table.
" + } } }, "traits": {