diff --git a/.changes/1.35.96.json b/.changes/1.35.96.json new file mode 100644 index 0000000000..776885cb0a --- /dev/null +++ b/.changes/1.35.96.json @@ -0,0 +1,17 @@ +[ + { + "category": "``codebuild``", + "description": "AWS CodeBuild Now Supports BuildBatch in Reserved Capacity and Lambda", + "type": "api-change" + }, + { + "category": "``compute-optimizer``", + "description": "This release expands AWS Compute Optimizer rightsizing recommendation support for Amazon EC2 Auto Scaling groups to include those with scaling policies and multiple instance types.", + "type": "api-change" + }, + { + "category": "``fms``", + "description": "AWS Firewall Manager now lets you combine multiple resource tags using the logical AND operator or the logical OR operator.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.35.97.json b/.changes/1.35.97.json new file mode 100644 index 0000000000..db30622d86 --- /dev/null +++ b/.changes/1.35.97.json @@ -0,0 +1,17 @@ +[ + { + "category": "``redshift``", + "description": "Additions to the PubliclyAccessible and Encrypted parameters clarifying what the defaults are.", + "type": "api-change" + }, + { + "category": "``securitylake``", + "description": "Doc only update for ServiceName that fixes several customer-reported issues", + "type": "api-change" + }, + { + "category": "``sts``", + "description": "Fixed typos in the descriptions.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.35.98.json b/.changes/1.35.98.json new file mode 100644 index 0000000000..d5042c05fe --- /dev/null +++ b/.changes/1.35.98.json @@ -0,0 +1,27 @@ +[ + { + "category": "``artifact``", + "description": "Support resolving regional API calls to partition's leader region endpoint.", + "type": "api-change" + }, + { + "category": "``bedrock``", + "description": "With this release, Bedrock Evaluation will now support latency-optimized inference for foundation models.", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "Add support for DisconnectOnSessionTimeout flag in CreateClientVpnEndpoint and ModifyClientVpnEndpoint requests and DescribeClientVpnEndpoints responses", + "type": "api-change" + }, + { + "category": "``kafkaconnect``", + "description": "Support updating connector configuration via UpdateConnector API. Release Operations API to monitor the status of the connector operation.", + "type": "api-change" + }, + { + "category": "``transcribe``", + "description": "This update provides tagging support for Transcribe's Call Analytics Jobs and Call Analytics Categories.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.35.99.json b/.changes/1.35.99.json new file mode 100644 index 0000000000..b791ac137e --- /dev/null +++ b/.changes/1.35.99.json @@ -0,0 +1,12 @@ +[ + { + "category": "``gamelift``", + "description": "Amazon GameLift releases a new game session placement feature: PriorityConfigurationOverride. You can now override how a game session queue prioritizes placement locations for a single StartGameSessionPlacement request.", + "type": "api-change" + }, + { + "category": "``route53``", + "description": "Amazon Route 53 now supports the Mexico (Central) Region (mx-central-1) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.36.0.json b/.changes/1.36.0.json new file mode 100644 index 0000000000..8d00e4babb --- /dev/null +++ b/.changes/1.36.0.json @@ -0,0 +1,67 @@ +[ + { + "category": "``apigateway``", + "description": "Documentation updates for Amazon API Gateway", + "type": "api-change" + }, + { + "category": "``bedrock-agent-runtime``", + "description": "Now supports streaming for inline agents.", + "type": "api-change" + }, + { + "category": "``cognito-identity``", + "description": "corrects the dual-stack endpoint configuration", + "type": "api-change" + }, + { + "category": "``partnercentral-selling``", + "description": "Add Tagging support for ResourceSnapshotJob resources", + "type": "api-change" + }, + { + "category": "``s3``", + "description": "This change enhances integrity protections for new SDK requests to S3. S3 SDKs now support the CRC64NVME checksum algorithm, full object checksums for multipart S3 objects, and new default integrity protections for S3 requests.", + "type": "api-change" + }, + { + "category": "``security-ir``", + "description": "Increase minimum length of Threat Actor IP 'userAgent' to 1.", + "type": "api-change" + }, + { + "category": "``sesv2``", + "description": "This release introduces a new recommendation in Virtual Deliverability Manager Advisor, which detects elevated complaint rates for customer sending identities.", + "type": "api-change" + }, + { + "category": "``workspaces``", + "description": "Added GeneralPurpose.4xlarge & GeneralPurpose.8xlarge ComputeTypes.", + "type": "api-change" + }, + { + "category": "``workspaces-thin-client``", + "description": "Mark type in MaintenanceWindow as required.", + "type": "api-change" + }, + { + "category": "AWSCRT", + "description": "Update awscrt version to 0.23.4", + "type": "enhancement" + }, + { + "category": "``s3``", + "description": "The S3 client attempts to validate response checksums for all S3 API operations that support checksums. However, if the SDK has not implemented the specified checksum algorithm then this validation is skipped. Checksum validation behavior can be configured using the ``when_supported`` and ``when_required`` options - in code using the ``response_checksum_validation`` parameter for ``botocore.config.Config``, in the shared AWS config file using ``response_checksum_validation``, or as an env variable using ``AWS_RESPONSE_CHECKSUM_VALIDATION``.", + "type": "feature" + }, + { + "category": "``s3``", + "description": "Added support for the CRC64NVME checksum algorithm in the S3 client through the optional AWS CRT (``awscrt``) dependency.", + "type": "feature" + }, + { + "category": "``s3``", + "description": "S3 client behavior is updated to always calculate a CRC32 checksum by default for operations that support it (such as PutObject or UploadPart), or require it (such as DeleteObjects). Checksum behavior can be configured using ``when_supported`` and ``when_required`` options - in code using the ``request_checksum_calculation`` parameter for ``botocore.config.Config``, in the shared AWS config file using ``request_checksum_calculation``, or as an env variable using ``AWS_REQUEST_CHECKSUM_CALCULATION``. Note: Botocore will no longer automatically compute and populate the Content-MD5 header.", + "type": "feature" + } +] \ No newline at end of file diff --git a/.changes/1.36.1.json b/.changes/1.36.1.json new file mode 100644 index 0000000000..2e77650de0 --- /dev/null +++ b/.changes/1.36.1.json @@ -0,0 +1,12 @@ +[ + { + "category": "``ecs``", + "description": "The release addresses Amazon ECS documentation tickets.", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "Added support for ml.trn1.32xlarge instance type in Reserved Capacity Offering", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.36.10.json b/.changes/1.36.10.json new file mode 100644 index 0000000000..ff1f13c990 --- /dev/null +++ b/.changes/1.36.10.json @@ -0,0 +1,47 @@ +[ + { + "category": "``appstream``", + "description": "Add support for managing admin consent requirement on selected domains for OneDrive Storage Connectors in AppStream2.0.", + "type": "api-change" + }, + { + "category": "``bedrock-agent-runtime``", + "description": "Add a 'reason' field to InternalServerException", + "type": "api-change" + }, + { + "category": "``ecr``", + "description": "Temporarily updating dualstack endpoint support", + "type": "api-change" + }, + { + "category": "``ecr-public``", + "description": "Temporarily updating dualstack endpoint support", + "type": "api-change" + }, + { + "category": "``mediatailor``", + "description": "Adds options for configuring how MediaTailor conditions ads before inserting them into the content stream. Based on the new settings, MediaTailor will either transcode ads to match the content stream as it has in the past, or it will insert ads without first transcoding them.", + "type": "api-change" + }, + { + "category": "``qbusiness``", + "description": "Added APIs to manage QBusiness user subscriptions", + "type": "api-change" + }, + { + "category": "``s3tables``", + "description": "You can now use the CreateTable API operation to create tables with schemas by adding an optional metadata argument.", + "type": "api-change" + }, + { + "category": "``verifiedpermissions``", + "description": "Adds Cedar JSON format support for entities and context data in authorization requests", + "type": "api-change" + }, + { + "category": "AWSCRT", + "description": "Update awscrt version to 0.23.8", + "type": "enhancement" + } +] \ No newline at end of file diff --git a/.changes/1.36.11.json b/.changes/1.36.11.json new file mode 100644 index 0000000000..40166a6a5c --- /dev/null +++ b/.changes/1.36.11.json @@ -0,0 +1,32 @@ +[ + { + "category": "``amp``", + "description": "Add support for sending metrics to cross account and CMCK AMP workspaces through RoleConfiguration on Create/Update Scraper.", + "type": "api-change" + }, + { + "category": "``bedrock-agent-runtime``", + "description": "This change is to deprecate the existing citation field under RetrieveAndGenerateStream API response in lieu of GeneratedResponsePart and RetrievedReferences", + "type": "api-change" + }, + { + "category": "``codebuild``", + "description": "Added support for CodeBuild self-hosted Buildkite runner builds", + "type": "api-change" + }, + { + "category": "``geo-routes``", + "description": "The OptimizeWaypoints API now supports 50 waypoints per request (20 with constraints like AccessHours or AppointmentTime). It adds waypoint clustering via Clustering and ClusteringIndex for better optimization. Also, total distance validation is removed for greater flexibility.", + "type": "api-change" + }, + { + "category": "``rds``", + "description": "Updates to Aurora MySQL and Aurora PostgreSQL API pages with instance log type in the create and modify DB Cluster.", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "This release introduces a new valid value in InstanceType parameter: p5en.48xlarge, in ProductionVariant.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.36.2.json b/.changes/1.36.2.json new file mode 100644 index 0000000000..b39c72d4d2 --- /dev/null +++ b/.changes/1.36.2.json @@ -0,0 +1,27 @@ +[ + { + "category": "``bedrock-runtime``", + "description": "Allow hyphens in tool name for Converse and ConverseStream APIs", + "type": "api-change" + }, + { + "category": "``detective``", + "description": "Doc only update for Detective documentation.", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "Release u7i-6tb.112xlarge, u7i-8tb.112xlarge, u7inh-32tb.480xlarge, p5e.48xlarge, p5en.48xlarge, f2.12xlarge, f2.48xlarge, trn2.48xlarge instance types.", + "type": "api-change" + }, + { + "category": "``notifications``", + "description": "Added support for Managed Notifications, integration with AWS Organization and added aggregation summaries for Aggregate Notifications", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "Correction of docs for \"Added support for ml.trn1.32xlarge instance type in Reserved Capacity Offering\"", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.36.3.json b/.changes/1.36.3.json new file mode 100644 index 0000000000..94029600af --- /dev/null +++ b/.changes/1.36.3.json @@ -0,0 +1,42 @@ +[ + { + "category": "``batch``", + "description": "Documentation-only update: clarified the description of the shareDecaySeconds parameter of the FairsharePolicy data type, clarified the description of the priority parameter of the JobQueueDetail data type.", + "type": "api-change" + }, + { + "category": "``cognito-idp``", + "description": "corrects the dual-stack endpoint configuration for cognitoidp", + "type": "api-change" + }, + { + "category": "``connect``", + "description": "Added DeleteContactFlowVersion API and the CAMPAIGN flow type", + "type": "api-change" + }, + { + "category": "``emr-serverless``", + "description": "Increasing entryPoint in SparkSubmit to accept longer script paths. New limit is 4kb.", + "type": "api-change" + }, + { + "category": "``iotsitewise``", + "description": "AWS IoT SiteWise now supports ingestion and querying of Null (all data types) and NaN (double type) values of bad or uncertain data quality. New partial error handling prevents data loss during ingestion. Enabled by default for new customers; existing customers can opt-in.", + "type": "api-change" + }, + { + "category": "``logs``", + "description": "Documentation-only update to address doc errors", + "type": "api-change" + }, + { + "category": "``quicksight``", + "description": "Added `DigitGroupingStyle` in ThousandsSeparator to allow grouping by `LAKH`( Indian Grouping system ) currency. Support LAKH and `CRORE` currency types in Column Formatting.", + "type": "api-change" + }, + { + "category": "``sns``", + "description": "This release adds support for the topic attribute FifoThroughputScope for SNS FIFO topics. For details, see the documentation history in the Amazon Simple Notification Service Developer Guide.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.36.4.json b/.changes/1.36.4.json new file mode 100644 index 0000000000..9142e622e6 --- /dev/null +++ b/.changes/1.36.4.json @@ -0,0 +1,22 @@ +[ + { + "category": "``bedrock-agent-runtime``", + "description": "Adds multi-turn input support for an Agent node in an Amazon Bedrock Flow", + "type": "api-change" + }, + { + "category": "``glue``", + "description": "Docs Update for timeout changes", + "type": "api-change" + }, + { + "category": "``medialive``", + "description": "AWS Elemental MediaLive adds a new feature, ID3 segment tagging, in CMAF Ingest output groups. It allows customers to insert ID3 tags into every output segment, controlled by a newly added channel schedule action Id3SegmentTagging.", + "type": "api-change" + }, + { + "category": "``workspaces-thin-client``", + "description": "Rename WorkSpaces Web to WorkSpaces Secure Browser", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.36.5.json b/.changes/1.36.5.json new file mode 100644 index 0000000000..80c9496ad9 --- /dev/null +++ b/.changes/1.36.5.json @@ -0,0 +1,7 @@ +[ + { + "category": "``ec2``", + "description": "Added \"future\" allocation type for future dated capacity reservation", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.36.6.json b/.changes/1.36.6.json new file mode 100644 index 0000000000..e57e3527f4 --- /dev/null +++ b/.changes/1.36.6.json @@ -0,0 +1,32 @@ +[ + { + "category": "``cloudtrail``", + "description": "This release introduces the SearchSampleQueries API that allows users to search for CloudTrail Lake sample queries.", + "type": "api-change" + }, + { + "category": "``eks``", + "description": "Adds support for UpdateStrategies in EKS Managed Node Groups.", + "type": "api-change" + }, + { + "category": "``healthlake``", + "description": "Added new authorization strategy value 'SMART_ON_FHIR' for CreateFHIRDatastore API to support Smart App 2.0", + "type": "api-change" + }, + { + "category": "``ssm``", + "description": "Systems Manager doc-only update for January, 2025.", + "type": "api-change" + }, + { + "category": "``sso-oidc``", + "description": "Fixed typos in the descriptions.", + "type": "api-change" + }, + { + "category": "``transfer``", + "description": "Added CustomDirectories as a new directory option for storing inbound AS2 messages, MDN files and Status files.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.36.7.json b/.changes/1.36.7.json new file mode 100644 index 0000000000..e3b00f53b0 --- /dev/null +++ b/.changes/1.36.7.json @@ -0,0 +1,27 @@ +[ + { + "category": "``bedrock-agent``", + "description": "Add support for the prompt caching feature for Bedrock Prompt Management", + "type": "api-change" + }, + { + "category": "``iot``", + "description": "Raised the documentParameters size limit to 30 KB for AWS IoT Device Management - Jobs.", + "type": "api-change" + }, + { + "category": "``mediaconvert``", + "description": "This release adds support for dynamic audio configuration and the ability to disable the deblocking filter for h265 encodes.", + "type": "api-change" + }, + { + "category": "``s3control``", + "description": "Minor fix to ARN validation for Lambda functions passed to S3 Batch Operations", + "type": "api-change" + }, + { + "category": "Signing", + "description": "No longer sign transfer-encoding header for SigV4", + "type": "bugfix" + } +] \ No newline at end of file diff --git a/.changes/1.36.8.json b/.changes/1.36.8.json new file mode 100644 index 0000000000..e68e5acbb2 --- /dev/null +++ b/.changes/1.36.8.json @@ -0,0 +1,32 @@ +[ + { + "category": "``appsync``", + "description": "Add stash and outErrors to EvaluateCode/EvaluateMappingTemplate response", + "type": "api-change" + }, + { + "category": "``datasync``", + "description": "AWS DataSync now supports the Kerberos authentication protocol for SMB locations.", + "type": "api-change" + }, + { + "category": "``deadline``", + "description": "feature: Deadline: Add support for limiting the concurrent usage of external resources, like floating licenses, using limits and the ability to constrain the maximum number of workers that work on a job", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "This release changes the CreateFleet CLI and SDK's such that if you do not specify a client token, a randomly generated token is used for the request to ensure idempotency.", + "type": "api-change" + }, + { + "category": "``firehose``", + "description": "For AppendOnly streams, Firehose will automatically scale to match your throughput.", + "type": "api-change" + }, + { + "category": "``timestream-influxdb``", + "description": "Adds 'allocatedStorage' parameter to UpdateDbInstance API that allows increasing the database instance storage size and 'dbStorageType' parameter to UpdateDbInstance API that allows changing the storage type of the database instance", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/1.36.9.json b/.changes/1.36.9.json new file mode 100644 index 0000000000..b24ad64671 --- /dev/null +++ b/.changes/1.36.9.json @@ -0,0 +1,27 @@ +[ + { + "category": "``bcm-pricing-calculator``", + "description": "Added ConflictException error type in DeleteBillScenario, BatchDeleteBillScenarioCommitmentModification, BatchDeleteBillScenarioUsageModification, BatchUpdateBillScenarioUsageModification, and BatchUpdateBillScenarioCommitmentModification API operations.", + "type": "api-change" + }, + { + "category": "``ecr``", + "description": "Add support for Dualstack and Dualstack-with-FIPS Endpoints", + "type": "api-change" + }, + { + "category": "``ecr-public``", + "description": "Add support for Dualstack Endpoints", + "type": "api-change" + }, + { + "category": "``mailmanager``", + "description": "This release includes a new feature for Amazon SES Mail Manager which allows customers to specify known addresses and domains and make use of those in traffic policies and rules actions to distinguish between known and unknown entries.", + "type": "api-change" + }, + { + "category": "``s3``", + "description": "Change the type of MpuObjectSize in CompleteMultipartUploadRequest from int to long.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.github/workflows/closed-issue-message.yml b/.github/workflows/closed-issue-message.yml index fe492c73e8..ae4e7a3eca 100644 --- a/.github/workflows/closed-issue-message.yml +++ b/.github/workflows/closed-issue-message.yml @@ -12,7 +12,7 @@ jobs: permissions: issues: write steps: - - uses: aws-actions/closed-issue-message@37548691e7cc75ba58f85c9f873f9eee43590449 + - uses: aws-actions/closed-issue-message@10aaf6366131b673a7c8b7742f8b3849f1d44f18 with: # These inputs are both required repo-token: "${{ secrets.GITHUB_TOKEN }}" diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 3c5061a1b4..4893c1ceee 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -23,13 +23,13 @@ jobs: uses: "actions/checkout@d632683dd7b4114ad314bca15554477dd762a938" - name: "Run CodeQL init" - uses: "github/codeql-action/init@662472033e021d55d94146f66f6058822b0b39fd" + uses: "github/codeql-action/init@b6a472f63d85b9c78a3ac5e89422239fc15e9b3c" with: config-file: "./.github/codeql.yml" languages: "python" - name: "Run CodeQL autobuild" - uses: "github/codeql-action/autobuild@662472033e021d55d94146f66f6058822b0b39fd" + uses: "github/codeql-action/autobuild@b6a472f63d85b9c78a3ac5e89422239fc15e9b3c" - name: "Run CodeQL analyze" - uses: "github/codeql-action/analyze@662472033e021d55d94146f66f6058822b0b39fd" + uses: "github/codeql-action/analyze@b6a472f63d85b9c78a3ac5e89422239fc15e9b3c" diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 0103b91748..581dd47e5f 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -15,7 +15,7 @@ jobs: steps: - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 - name: Set up Python 3.9 - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b + uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 with: python-version: 3.9 - name: Run pre-commit diff --git a/.github/workflows/run-crt-test.yml b/.github/workflows/run-crt-test.yml index ee2d0ca9ac..1b74415c2e 100644 --- a/.github/workflows/run-crt-test.yml +++ b/.github/workflows/run-crt-test.yml @@ -20,7 +20,7 @@ jobs: steps: - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 - name: 'Set up Python ${{ matrix.python-version }}' - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b + uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 with: python-version: '${{ matrix.python-version }}' cache: 'pip' diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index 8a90a18ecf..9bd4eb81ee 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -20,7 +20,7 @@ jobs: steps: - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 - name: 'Set up Python ${{ matrix.python-version }}' - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b + uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 with: python-version: '${{ matrix.python-version }}' cache: 'pip' @@ -32,7 +32,7 @@ jobs: run: | python scripts/ci/run-tests --with-cov --with-xdist - name: Run codecov - uses: codecov/codecov-action@7f8b4b4bde536c465e797be725718b88c5d95e0e + uses: codecov/codecov-action@13ce06bfc6bbe3ecf90edbbf1bc32fe5978ca1d3 with: directory: tests @@ -46,7 +46,7 @@ jobs: steps: - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 - name: 'Set up Python 3.10' - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b + uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 with: python-version: '3.10' - name: Install dependencies diff --git a/.github/workflows/stale_issue.yml b/.github/workflows/stale_issue.yml index 7789fe4889..33d2dc2d75 100644 --- a/.github/workflows/stale_issue.yml +++ b/.github/workflows/stale_issue.yml @@ -17,7 +17,7 @@ jobs: runs-on: ubuntu-latest name: Stale issue job steps: - - uses: aws-actions/stale-issue-cleanup@413d85a1603df4f0a8158f5f3c8204ab1691313e + - uses: aws-actions/stale-issue-cleanup@58eca62c897621a4ad524b1612a982d02d6b0d3c with: issue-types: issues stale-issue-message: Greetings! It looks like this issue hasn’t been diff --git a/CHANGELOG.rst b/CHANGELOG.rst index ddfcd1957b..4d7ad650d7 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,169 @@ CHANGELOG ========= +1.36.11 +======= + +* api-change:``amp``: Add support for sending metrics to cross account and CMCK AMP workspaces through RoleConfiguration on Create/Update Scraper. +* api-change:``bedrock-agent-runtime``: This change is to deprecate the existing citation field under RetrieveAndGenerateStream API response in lieu of GeneratedResponsePart and RetrievedReferences +* api-change:``codebuild``: Added support for CodeBuild self-hosted Buildkite runner builds +* api-change:``geo-routes``: The OptimizeWaypoints API now supports 50 waypoints per request (20 with constraints like AccessHours or AppointmentTime). It adds waypoint clustering via Clustering and ClusteringIndex for better optimization. Also, total distance validation is removed for greater flexibility. +* api-change:``rds``: Updates to Aurora MySQL and Aurora PostgreSQL API pages with instance log type in the create and modify DB Cluster. +* api-change:``sagemaker``: This release introduces a new valid value in InstanceType parameter: p5en.48xlarge, in ProductionVariant. + + +1.36.10 +======= + +* api-change:``appstream``: Add support for managing admin consent requirement on selected domains for OneDrive Storage Connectors in AppStream2.0. +* api-change:``bedrock-agent-runtime``: Add a 'reason' field to InternalServerException +* api-change:``ecr``: Temporarily updating dualstack endpoint support +* api-change:``ecr-public``: Temporarily updating dualstack endpoint support +* api-change:``mediatailor``: Adds options for configuring how MediaTailor conditions ads before inserting them into the content stream. Based on the new settings, MediaTailor will either transcode ads to match the content stream as it has in the past, or it will insert ads without first transcoding them. +* api-change:``qbusiness``: Added APIs to manage QBusiness user subscriptions +* api-change:``s3tables``: You can now use the CreateTable API operation to create tables with schemas by adding an optional metadata argument. +* api-change:``verifiedpermissions``: Adds Cedar JSON format support for entities and context data in authorization requests +* enhancement:AWSCRT: Update awscrt version to 0.23.8 + + +1.36.9 +====== + +* api-change:``bcm-pricing-calculator``: Added ConflictException error type in DeleteBillScenario, BatchDeleteBillScenarioCommitmentModification, BatchDeleteBillScenarioUsageModification, BatchUpdateBillScenarioUsageModification, and BatchUpdateBillScenarioCommitmentModification API operations. +* api-change:``ecr``: Add support for Dualstack and Dualstack-with-FIPS Endpoints +* api-change:``ecr-public``: Add support for Dualstack Endpoints +* api-change:``mailmanager``: This release includes a new feature for Amazon SES Mail Manager which allows customers to specify known addresses and domains and make use of those in traffic policies and rules actions to distinguish between known and unknown entries. +* api-change:``s3``: Change the type of MpuObjectSize in CompleteMultipartUploadRequest from int to long. + + +1.36.8 +====== + +* api-change:``appsync``: Add stash and outErrors to EvaluateCode/EvaluateMappingTemplate response +* api-change:``datasync``: AWS DataSync now supports the Kerberos authentication protocol for SMB locations. +* api-change:``deadline``: feature: Deadline: Add support for limiting the concurrent usage of external resources, like floating licenses, using limits and the ability to constrain the maximum number of workers that work on a job +* api-change:``ec2``: This release changes the CreateFleet CLI and SDK's such that if you do not specify a client token, a randomly generated token is used for the request to ensure idempotency. +* api-change:``firehose``: For AppendOnly streams, Firehose will automatically scale to match your throughput. +* api-change:``timestream-influxdb``: Adds 'allocatedStorage' parameter to UpdateDbInstance API that allows increasing the database instance storage size and 'dbStorageType' parameter to UpdateDbInstance API that allows changing the storage type of the database instance + + +1.36.7 +====== + +* api-change:``bedrock-agent``: Add support for the prompt caching feature for Bedrock Prompt Management +* api-change:``iot``: Raised the documentParameters size limit to 30 KB for AWS IoT Device Management - Jobs. +* api-change:``mediaconvert``: This release adds support for dynamic audio configuration and the ability to disable the deblocking filter for h265 encodes. +* api-change:``s3control``: Minor fix to ARN validation for Lambda functions passed to S3 Batch Operations +* bugfix:Signing: No longer sign transfer-encoding header for SigV4 + + +1.36.6 +====== + +* api-change:``cloudtrail``: This release introduces the SearchSampleQueries API that allows users to search for CloudTrail Lake sample queries. +* api-change:``eks``: Adds support for UpdateStrategies in EKS Managed Node Groups. +* api-change:``healthlake``: Added new authorization strategy value 'SMART_ON_FHIR' for CreateFHIRDatastore API to support Smart App 2.0 +* api-change:``ssm``: Systems Manager doc-only update for January, 2025. +* api-change:``sso-oidc``: Fixed typos in the descriptions. +* api-change:``transfer``: Added CustomDirectories as a new directory option for storing inbound AS2 messages, MDN files and Status files. + + +1.36.5 +====== + +* api-change:``ec2``: Added "future" allocation type for future dated capacity reservation + + +1.36.4 +====== + +* api-change:``bedrock-agent-runtime``: Adds multi-turn input support for an Agent node in an Amazon Bedrock Flow +* api-change:``glue``: Docs Update for timeout changes +* api-change:``medialive``: AWS Elemental MediaLive adds a new feature, ID3 segment tagging, in CMAF Ingest output groups. It allows customers to insert ID3 tags into every output segment, controlled by a newly added channel schedule action Id3SegmentTagging. +* api-change:``workspaces-thin-client``: Rename WorkSpaces Web to WorkSpaces Secure Browser + + +1.36.3 +====== + +* api-change:``batch``: Documentation-only update: clarified the description of the shareDecaySeconds parameter of the FairsharePolicy data type, clarified the description of the priority parameter of the JobQueueDetail data type. +* api-change:``cognito-idp``: corrects the dual-stack endpoint configuration for cognitoidp +* api-change:``connect``: Added DeleteContactFlowVersion API and the CAMPAIGN flow type +* api-change:``emr-serverless``: Increasing entryPoint in SparkSubmit to accept longer script paths. New limit is 4kb. +* api-change:``iotsitewise``: AWS IoT SiteWise now supports ingestion and querying of Null (all data types) and NaN (double type) values of bad or uncertain data quality. New partial error handling prevents data loss during ingestion. Enabled by default for new customers; existing customers can opt-in. +* api-change:``logs``: Documentation-only update to address doc errors +* api-change:``quicksight``: Added `DigitGroupingStyle` in ThousandsSeparator to allow grouping by `LAKH`( Indian Grouping system ) currency. Support LAKH and `CRORE` currency types in Column Formatting. +* api-change:``sns``: This release adds support for the topic attribute FifoThroughputScope for SNS FIFO topics. For details, see the documentation history in the Amazon Simple Notification Service Developer Guide. + + +1.36.2 +====== + +* api-change:``bedrock-runtime``: Allow hyphens in tool name for Converse and ConverseStream APIs +* api-change:``detective``: Doc only update for Detective documentation. +* api-change:``ec2``: Release u7i-6tb.112xlarge, u7i-8tb.112xlarge, u7inh-32tb.480xlarge, p5e.48xlarge, p5en.48xlarge, f2.12xlarge, f2.48xlarge, trn2.48xlarge instance types. +* api-change:``notifications``: Added support for Managed Notifications, integration with AWS Organization and added aggregation summaries for Aggregate Notifications +* api-change:``sagemaker``: Correction of docs for "Added support for ml.trn1.32xlarge instance type in Reserved Capacity Offering" + + +1.36.1 +====== + +* api-change:``ecs``: The release addresses Amazon ECS documentation tickets. +* api-change:``sagemaker``: Added support for ml.trn1.32xlarge instance type in Reserved Capacity Offering + + +1.36.0 +====== + +* api-change:``apigateway``: Documentation updates for Amazon API Gateway +* api-change:``bedrock-agent-runtime``: Now supports streaming for inline agents. +* api-change:``cognito-identity``: corrects the dual-stack endpoint configuration +* api-change:``partnercentral-selling``: Add Tagging support for ResourceSnapshotJob resources +* api-change:``s3``: This change enhances integrity protections for new SDK requests to S3. S3 SDKs now support the CRC64NVME checksum algorithm, full object checksums for multipart S3 objects, and new default integrity protections for S3 requests. +* api-change:``security-ir``: Increase minimum length of Threat Actor IP 'userAgent' to 1. +* api-change:``sesv2``: This release introduces a new recommendation in Virtual Deliverability Manager Advisor, which detects elevated complaint rates for customer sending identities. +* api-change:``workspaces``: Added GeneralPurpose.4xlarge & GeneralPurpose.8xlarge ComputeTypes. +* api-change:``workspaces-thin-client``: Mark type in MaintenanceWindow as required. +* enhancement:AWSCRT: Update awscrt version to 0.23.4 +* feature:``s3``: The S3 client attempts to validate response checksums for all S3 API operations that support checksums. However, if the SDK has not implemented the specified checksum algorithm then this validation is skipped. Checksum validation behavior can be configured using the ``when_supported`` and ``when_required`` options - in code using the ``response_checksum_validation`` parameter for ``botocore.config.Config``, in the shared AWS config file using ``response_checksum_validation``, or as an env variable using ``AWS_RESPONSE_CHECKSUM_VALIDATION``. +* feature:``s3``: Added support for the CRC64NVME checksum algorithm in the S3 client through the optional AWS CRT (``awscrt``) dependency. +* feature:``s3``: S3 client behavior is updated to always calculate a CRC32 checksum by default for operations that support it (such as PutObject or UploadPart), or require it (such as DeleteObjects). Checksum behavior can be configured using ``when_supported`` and ``when_required`` options - in code using the ``request_checksum_calculation`` parameter for ``botocore.config.Config``, in the shared AWS config file using ``request_checksum_calculation``, or as an env variable using ``AWS_REQUEST_CHECKSUM_CALCULATION``. Note: Botocore will no longer automatically compute and populate the Content-MD5 header. + + +1.35.99 +======= + +* api-change:``gamelift``: Amazon GameLift releases a new game session placement feature: PriorityConfigurationOverride. You can now override how a game session queue prioritizes placement locations for a single StartGameSessionPlacement request. +* api-change:``route53``: Amazon Route 53 now supports the Mexico (Central) Region (mx-central-1) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region + + +1.35.98 +======= + +* api-change:``artifact``: Support resolving regional API calls to partition's leader region endpoint. +* api-change:``bedrock``: With this release, Bedrock Evaluation will now support latency-optimized inference for foundation models. +* api-change:``ec2``: Add support for DisconnectOnSessionTimeout flag in CreateClientVpnEndpoint and ModifyClientVpnEndpoint requests and DescribeClientVpnEndpoints responses +* api-change:``kafkaconnect``: Support updating connector configuration via UpdateConnector API. Release Operations API to monitor the status of the connector operation. +* api-change:``transcribe``: This update provides tagging support for Transcribe's Call Analytics Jobs and Call Analytics Categories. + + +1.35.97 +======= + +* api-change:``redshift``: Additions to the PubliclyAccessible and Encrypted parameters clarifying what the defaults are. +* api-change:``securitylake``: Doc only update for ServiceName that fixes several customer-reported issues +* api-change:``sts``: Fixed typos in the descriptions. + + +1.35.96 +======= + +* api-change:``codebuild``: AWS CodeBuild Now Supports BuildBatch in Reserved Capacity and Lambda +* api-change:``compute-optimizer``: This release expands AWS Compute Optimizer rightsizing recommendation support for Amazon EC2 Auto Scaling groups to include those with scaling policies and multiple instance types. +* api-change:``fms``: AWS Firewall Manager now lets you combine multiple resource tags using the logical AND operator or the logical OR operator. + + 1.35.95 ======= diff --git a/botocore/__init__.py b/botocore/__init__.py index a17445c619..f909ebc376 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.35.95' +__version__ = '1.36.11' class NullHandler(logging.Handler): diff --git a/botocore/args.py b/botocore/args.py index aeb9ff4198..df08dd3cc9 100644 --- a/botocore/args.py +++ b/botocore/args.py @@ -61,6 +61,15 @@ # values result in a warning-level log message. USERAGENT_APPID_MAXLEN = 50 +VALID_REQUEST_CHECKSUM_CALCULATION_CONFIG = ( + "when_supported", + "when_required", +) +VALID_RESPONSE_CHECKSUM_VALIDATION_CONFIG = ( + "when_supported", + "when_required", +) + class ClientArgsCreator: def __init__( @@ -271,12 +280,19 @@ def compute_client_args( sigv4a_signing_region_set=( client_config.sigv4a_signing_region_set ), + request_checksum_calculation=( + client_config.request_checksum_calculation + ), + response_checksum_validation=( + client_config.response_checksum_validation + ), ) self._compute_retry_config(config_kwargs) self._compute_connect_timeout(config_kwargs) self._compute_user_agent_appid_config(config_kwargs) self._compute_request_compression_config(config_kwargs) self._compute_sigv4a_signing_region_set_config(config_kwargs) + self._compute_checksum_config(config_kwargs) s3_config = self.compute_s3_config(client_config) is_s3_service = self._is_s3_service(service_name) @@ -581,25 +597,24 @@ def _compute_request_compression_config(self, config_kwargs): def _validate_min_compression_size(self, min_size): min_allowed_min_size = 1 max_allowed_min_size = 1048576 - if min_size is not None: - error_msg_base = ( - f'Invalid value "{min_size}" for ' - 'request_min_compression_size_bytes.' + error_msg_base = ( + f'Invalid value "{min_size}" for ' + 'request_min_compression_size_bytes.' + ) + try: + min_size = int(min_size) + except (ValueError, TypeError): + msg = ( + f'{error_msg_base} Value must be an integer. ' + f'Received {type(min_size)} instead.' ) - try: - min_size = int(min_size) - except (ValueError, TypeError): - msg = ( - f'{error_msg_base} Value must be an integer. ' - f'Received {type(min_size)} instead.' - ) - raise botocore.exceptions.InvalidConfigError(error_msg=msg) - if not min_allowed_min_size <= min_size <= max_allowed_min_size: - msg = ( - f'{error_msg_base} Value must be between ' - f'{min_allowed_min_size} and {max_allowed_min_size}.' - ) - raise botocore.exceptions.InvalidConfigError(error_msg=msg) + raise botocore.exceptions.InvalidConfigError(error_msg=msg) + if not min_allowed_min_size <= min_size <= max_allowed_min_size: + msg = ( + f'{error_msg_base} Value must be between ' + f'{min_allowed_min_size} and {max_allowed_min_size}.' + ) + raise botocore.exceptions.InvalidConfigError(error_msg=msg) return min_size @@ -782,3 +797,36 @@ def _compute_sigv4a_signing_region_set_config(self, config_kwargs): 'sigv4a_signing_region_set' ) config_kwargs['sigv4a_signing_region_set'] = sigv4a_signing_region_set + + def _compute_checksum_config(self, config_kwargs): + self._handle_checksum_config( + config_kwargs, + config_key="request_checksum_calculation", + valid_options=VALID_REQUEST_CHECKSUM_CALCULATION_CONFIG, + ) + self._handle_checksum_config( + config_kwargs, + config_key="response_checksum_validation", + valid_options=VALID_RESPONSE_CHECKSUM_VALIDATION_CONFIG, + ) + + def _handle_checksum_config( + self, + config_kwargs, + config_key, + valid_options, + ): + value = config_kwargs.get(config_key) + if value is None: + value = self._config_store.get_config_variable(config_key) + + if isinstance(value, str): + value = value.lower() + + if value not in valid_options: + raise botocore.exceptions.InvalidChecksumConfigError( + config_key=config_key, + config_value=value, + valid_options=valid_options, + ) + config_kwargs[config_key] = value diff --git a/botocore/auth.py b/botocore/auth.py index 66e605a665..bacbd39dde 100644 --- a/botocore/auth.py +++ b/botocore/auth.py @@ -65,6 +65,7 @@ SIGV4_TIMESTAMP = '%Y%m%dT%H%M%SZ' SIGNED_HEADERS_BLACKLIST = [ 'expect', + 'transfer-encoding', 'user-agent', 'x-amzn-trace-id', ] diff --git a/botocore/config.py b/botocore/config.py index eee55bb06d..0d20b69b8f 100644 --- a/botocore/config.py +++ b/botocore/config.py @@ -235,6 +235,37 @@ class Config: specified service will be ignored. Defaults to None. + + :type request_checksum_calculation: str + :param request_checksum_calculation: Determines when a checksum will be + calculated for request payloads. Valid values are: + + * ``when_supported`` -- When set, a checksum will be calculated for + all request payloads of operations modeled with the ``httpChecksum`` + trait where ``requestChecksumRequired`` is ``true`` or a + ``requestAlgorithmMember`` is modeled. + + * ``when_required`` -- When set, a checksum will only be calculated + for request payloads of operations modeled with the ``httpChecksum`` + trait where ``requestChecksumRequired`` is ``true`` or where a + ``requestAlgorithmMember`` is modeled and supplied. + + Defaults to None. + + :type response_checksum_validation: str + :param response_checksum_validation: Determines when checksum validation + will be performed on response payloads. Valid values are: + + * ``when_supported`` -- When set, checksum validation is performed on + all response payloads of operations modeled with the ``httpChecksum`` + trait where ``responseAlgorithms`` is modeled, except when no modeled + checksum algorithms are supported. + + * ``when_required`` -- When set, checksum validation is not performed + on response payloads of operations unless the checksum algorithm is + supported and the ``requestValidationModeMember`` member is set to ``ENABLED``. + + Defaults to None. """ OPTION_DEFAULTS = OrderedDict( @@ -264,6 +295,8 @@ class Config: ('disable_request_compression', None), ('client_context_params', None), ('sigv4a_signing_region_set', None), + ('request_checksum_calculation', None), + ('response_checksum_validation', None), ] ) diff --git a/botocore/configprovider.py b/botocore/configprovider.py index be5ce09004..5ebe85235d 100644 --- a/botocore/configprovider.py +++ b/botocore/configprovider.py @@ -168,6 +168,18 @@ None, None, ), + 'request_checksum_calculation': ( + 'request_checksum_calculation', + 'AWS_REQUEST_CHECKSUM_CALCULATION', + "when_supported", + None, + ), + 'response_checksum_validation': ( + 'response_checksum_validation', + 'AWS_RESPONSE_CHECKSUM_VALIDATION', + "when_supported", + None, + ), } # Evaluate AWS_STS_REGIONAL_ENDPOINTS settings @@ -468,7 +480,7 @@ def __copy__(self): def get_config_variable(self, logical_name): """ - Retrieve the value associeated with the specified logical_name + Retrieve the value associated with the specified logical_name from the corresponding provider. If no value is found None will be returned. diff --git a/botocore/data/amp/2020-08-01/service-2.json b/botocore/data/amp/2020-08-01/service-2.json index 2ba81375e7..beb8e97f39 100644 --- a/botocore/data/amp/2020-08-01/service-2.json +++ b/botocore/data/amp/2020-08-01/service-2.json @@ -800,6 +800,10 @@ "shape":"Destination", "documentation":"
The Amazon Managed Service for Prometheus workspace to send metrics to.
" }, + "roleConfiguration":{ + "shape":"RoleConfiguration", + "documentation":"The scraper role configuration for the workspace.
" + }, "scrapeConfiguration":{ "shape":"ScrapeConfiguration", "documentation":"The configuration file to use in the new scraper. For more information, see Scraper configuration in the Amazon Managed Service for Prometheus User Guide.
" @@ -1223,7 +1227,8 @@ }, "IamRoleArn":{ "type":"string", - "documentation":"An ARN identifying an IAM role used by the scraper.
" + "documentation":"An ARN identifying an IAM role used by the scraper.
", + "pattern":"^arn:aws[-a-z]*:iam::[0-9]{12}:role/.+$" }, "IdempotencyToken":{ "type":"string", @@ -1262,7 +1267,7 @@ "documentation":"A KMS Key ARN.
", "max":2048, "min":20, - "pattern":"^arn:aws:kms:[a-z0-9\\-]+:\\d+:key/[a-f0-9\\-]+$" + "pattern":"^arn:aws[-a-z]*:kms:[-a-z0-9]+:[0-9]{12}:key/[-a-f0-9]+$" }, "ListRuleGroupsNamespacesRequest":{ "type":"structure", @@ -1428,7 +1433,7 @@ }, "LogGroupArn":{ "type":"string", - "pattern":"^arn:aws[a-z0-9-]*:logs:[a-z0-9-]+:\\d{12}:log-group:[A-Za-z0-9\\.\\-\\_\\#/]{1,512}\\:\\*$" + "pattern":"^arn:aws[-a-z]*:logs:[-a-z0-9]+:[0-9]{12}:log-group:[A-Za-z0-9\\.\\-\\_\\#/]{1,512}\\:\\*$" }, "LoggingConfigurationMetadata":{ "type":"structure", @@ -1619,6 +1624,20 @@ }, "exception":true }, + "RoleConfiguration":{ + "type":"structure", + "members":{ + "sourceRoleArn":{ + "shape":"IamRoleArn", + "documentation":"A ARN identifying the source role configuration.
" + }, + "targetRoleArn":{ + "shape":"IamRoleArn", + "documentation":"A ARN identifying the target role configuration.
" + } + }, + "documentation":"To configure roles that allows users to write to an Amazon Managed Service for Prometheus workspace in a different account.
" + }, "RuleGroupsNamespaceArn":{ "type":"string", "documentation":"An ARN identifying a rule groups namespace.
" @@ -1805,6 +1824,7 @@ "shape":"IamRoleArn", "documentation":"The Amazon Resource Name (ARN) of the IAM role that provides permissions for the scraper to discover and collect metrics on your behalf.
For example, arn:aws:iam::123456789012:role/service-role/AmazonGrafanaServiceRole-12example
.
The configuration in use by the scraper.
" @@ -1914,6 +1934,7 @@ "shape":"IamRoleArn", "documentation":"The Amazon Resource Name (ARN) of the IAM role that provides permissions for the scraper to discover and collect metrics on your behalf.
" }, + "roleConfiguration":{"shape":"RoleConfiguration"}, "scraperId":{ "shape":"ScraperId", "documentation":"The ID of the scraper.
" @@ -2191,6 +2212,10 @@ "shape":"Destination", "documentation":"The new Amazon Managed Service for Prometheus workspace to send metrics to.
" }, + "roleConfiguration":{ + "shape":"RoleConfiguration", + "documentation":"The scraper role configuration for the workspace.
" + }, "scrapeConfiguration":{ "shape":"ScrapeConfiguration", "documentation":"Contains the base-64 encoded YAML configuration for the scraper.
For more information about configuring a scraper, see Using an Amazon Web Services managed collector in the Amazon Managed Service for Prometheus User Guide.
The identifier for the domain name resource. Supported only for private custom domain names.
", + "documentation":"The identifier for the domain name resource. Required for private custom domain names.
", "location":"querystring", "locationName":"domainNameId" }, @@ -4474,7 +4474,7 @@ }, "domainNameId":{ "shape":"String", - "documentation":"The identifier for the domain name resource. Supported only for private custom domain names.
", + "documentation":"The identifier for the domain name resource. Required for private custom domain names.
", "location":"querystring", "locationName":"domainNameId" } diff --git a/botocore/data/appstream/2016-12-01/service-2.json b/botocore/data/appstream/2016-12-01/service-2.json index 04e2309b13..0e18cdec88 100644 --- a/botocore/data/appstream/2016-12-01/service-2.json +++ b/botocore/data/appstream/2016-12-01/service-2.json @@ -5041,6 +5041,10 @@ "Domains":{ "shape":"DomainList", "documentation":"The names of the domains for the account.
" + }, + "DomainsRequireAdminConsent":{ + "shape":"DomainList", + "documentation":"The OneDrive for Business domains where you require admin consent when users try to link their OneDrive account to AppStream 2.0. The attribute can only be specified when ConnectorType=ONE_DRIVE.
" } }, "documentation":"Describes a connector that enables persistent storage for users.
" diff --git a/botocore/data/appsync/2017-07-25/service-2.json b/botocore/data/appsync/2017-07-25/service-2.json index dfc9fae8d0..96e18b162b 100644 --- a/botocore/data/appsync/2017-07-25/service-2.json +++ b/botocore/data/appsync/2017-07-25/service-2.json @@ -3189,6 +3189,14 @@ "logs":{ "shape":"Logs", "documentation":"A list of logs that were generated by calls to util.log.info
and util.log.error
in the evaluated code.
An object available inside each resolver and function handler. A single stash
object lives through a single resolver run. Therefore, you can use the stash to pass arbitrary data across request and response handlers and across functions in a pipeline resolver.
The list of runtime errors that are added to the GraphQL operation response.
" } } }, @@ -3223,6 +3231,14 @@ "logs":{ "shape":"Logs", "documentation":"A list of logs that were generated by calls to util.log.info
and util.log.error
in the evaluated code.
An object available inside each resolver and function handler. A single stash
object lives through a single resolver run. Therefore, you can use the stash to pass arbitrary data across request and response handlers and across functions in a pipeline resolver.
The list of runtime errors that are added to the GraphQL operation response.
" } } }, @@ -4645,6 +4661,10 @@ "DISABLED" ] }, + "OutErrors":{ + "type":"string", + "pattern":"^[\\s\\S]*$" + }, "OutputType":{ "type":"string", "enum":[ @@ -5118,6 +5138,10 @@ } } }, + "Stash":{ + "type":"string", + "pattern":"^[\\s\\S]*$" + }, "String":{"type":"string"}, "SyncConfig":{ "type":"structure", diff --git a/botocore/data/artifact/2018-05-10/endpoint-rule-set-1.json b/botocore/data/artifact/2018-05-10/endpoint-rule-set-1.json index 49efffd757..5da6f43cf5 100644 --- a/botocore/data/artifact/2018-05-10/endpoint-rule-set-1.json +++ b/botocore/data/artifact/2018-05-10/endpoint-rule-set-1.json @@ -1,12 +1,6 @@ { "version": "1.0", "parameters": { - "Region": { - "builtIn": "AWS::Region", - "required": false, - "documentation": "The AWS region used to dispatch the request.", - "type": "String" - }, "UseDualStack": { "builtIn": "AWS::UseDualStack", "required": true, @@ -26,6 +20,12 @@ "required": false, "documentation": "Override the endpoint used to send this request", "type": "String" + }, + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" } }, "rules": [ @@ -177,18 +177,19 @@ "rules": [ { "conditions": [], - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://artifact-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" + "endpoint": { + "url": "https://artifact-fips.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" } ], "type": "tree" @@ -211,6 +212,15 @@ }, true ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] } ], "rules": [ @@ -235,18 +245,19 @@ "rules": [ { "conditions": [], - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://artifact-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" + "endpoint": { + "url": "https://artifact-fips.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" } ], "type": "tree" @@ -261,6 +272,15 @@ }, { "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, { "fn": "booleanEquals", "argv": [ @@ -293,18 +313,19 @@ "rules": [ { "conditions": [], - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://artifact.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" + "endpoint": { + "url": "https://artifact.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" } ], "type": "tree" @@ -319,18 +340,19 @@ }, { "conditions": [], - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://artifact.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" + "endpoint": { + "url": "https://artifact.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" } ], "type": "tree" diff --git a/botocore/data/batch/2016-08-10/service-2.json b/botocore/data/batch/2016-08-10/service-2.json index 2f2550a1f2..5cc1f6cb65 100644 --- a/botocore/data/batch/2016-08-10/service-2.json +++ b/botocore/data/batch/2016-08-10/service-2.json @@ -2366,7 +2366,7 @@ "members":{ "shareDecaySeconds":{ "shape":"Integer", - "documentation":"The amount of time (in seconds) to use to calculate a fair share percentage for each fair share identifier in use. A value of zero (0) indicates that only current usage is measured. The decay allows for more recently run jobs to have more weight than jobs that ran earlier. The maximum supported value is 604800 (1 week).
" + "documentation":"The amount of time (in seconds) to use to calculate a fair share percentage for each fair share identifier in use. A value of zero (0) indicates the default minimum time window (600 seconds). The maximum supported value is 604800 (1 week).
The decay allows for more recently run jobs to have more weight than jobs that ran earlier. Consider adjusting this number if you have jobs that (on average) run longer than ten minutes, or a large difference in job count or job run times between share identifiers, and the allocation of resources doesn’t meet your needs.
" }, "computeReservation":{ "shape":"Integer", @@ -2779,7 +2779,7 @@ }, "priority":{ "shape":"Integer", - "documentation":"The priority of the job queue. Job queues with a higher priority (or a higher integer value for the priority
parameter) are evaluated first when associated with the same compute environment. Priority is determined in descending order. For example, a job queue with a priority value of 10
is given scheduling preference over a job queue with a priority value of 1
. All of the compute environments must be either Amazon EC2 (EC2
or SPOT
) or Fargate (FARGATE
or FARGATE_SPOT
). Amazon EC2 and Fargate compute environments can't be mixed.
The priority of the job queue. Job queue priority determines the order that job queues are evaluated when multiple queues dispatch jobs within a shared compute environment. A higher value for priority
indicates a higher priority. Queues are evaluated in cycles, in descending order by priority. For example, a job queue with a priority value of 10
is evaluated before a queue with a priority value of 1
. All of the compute environments must be either Amazon EC2 (EC2
or SPOT
) or Fargate (FARGATE
or FARGATE_SPOT
). Amazon EC2 and Fargate compute environments can't be mixed.
Job queue priority doesn't guarantee that a particular job executes before a job in a lower priority queue. Jobs added to higher priority queues during the queue evaluation cycle might not be evaluated until the next cycle. A job is dispatched from a queue only if resources are available when the queue is evaluated. If there are insufficient resources available at that time, the cycle proceeds to the next queue. This means that jobs added to higher priority queues might have to wait for jobs in multiple lower priority queues to complete before they are dispatched. You can use job dependencies to control the order for jobs from queues with different priorities. For more information, see Job Dependencies in the Batch User Guide.
The instance type or family that this this override launch template should be applied to.
This parameter is required when defining a launch template override.
Information included in this parameter must meet the following requirements:
Must be a valid Amazon EC2 instance type or family.
optimal
isn't allowed.
targetInstanceTypes
can target only instance types and families that are included within the ComputeResource.instanceTypes
set. targetInstanceTypes
doesn't need to include all of the instances from the instanceType
set, but at least a subset. For example, if ComputeResource.instanceTypes
includes [m5, g5]
, targetInstanceTypes
can include [m5.2xlarge]
and [m5.large]
but not [c5.large]
.
targetInstanceTypes
included within the same launch template override or across launch template overrides can't overlap for the same compute environment. For example, you can't define one launch template override to target an instance family and another define an instance type within this same family.
The instance type or family that this override launch template should be applied to.
This parameter is required when defining a launch template override.
Information included in this parameter must meet the following requirements:
Must be a valid Amazon EC2 instance type or family.
optimal
isn't allowed.
targetInstanceTypes
can target only instance types and families that are included within the ComputeResource.instanceTypes
set. targetInstanceTypes
doesn't need to include all of the instances from the instanceType
set, but at least a subset. For example, if ComputeResource.instanceTypes
includes [m5, g5]
, targetInstanceTypes
can include [m5.2xlarge]
and [m5.large]
but not [c5.large]
.
targetInstanceTypes
included within the same launch template override or across launch template overrides can't overlap for the same compute environment. For example, you can't define one launch template override to target an instance family and another define an instance type within this same family.
An object that represents a launch template to use in place of the default launch template. You must specify either the launch template ID or launch template name in the request, but not both.
If security groups are specified using both the securityGroupIds
parameter of CreateComputeEnvironment
and the launch template, the values in the securityGroupIds
parameter of CreateComputeEnvironment
will be used.
You can define up to ten (10) overrides for each compute environment.
This object isn't applicable to jobs that are running on Fargate resources.
To unset all override templates for a compute environment, you can pass an empty array to the UpdateComputeEnvironment.overrides parameter, or not include the overrides
parameter when submitting the UpdateComputeEnvironment
API operation.
The environment variables to pass to a container. This parameter maps to Env inthe Create a container section of the Docker Remote API and the --env
parameter to docker run.
We don't recommend using plaintext environment variables for sensitive information, such as credential data.
Environment variables cannot start with AWS_BATCH
. This naming convention is reserved for variables that Batch sets.
The environment variables to pass to a container. This parameter maps to Env in the Create a container section of the Docker Remote API and the --env
parameter to docker run.
We don't recommend using plaintext environment variables for sensitive information, such as credential data.
Environment variables cannot start with AWS_BATCH
. This naming convention is reserved for variables that Batch sets.
Create Compute Savings Plans, EC2 Instance Savings Plans, or EC2 Reserved Instances commitments that you want to model in a Bill Scenario.
", + "documentation":"Create Compute Savings Plans, EC2 Instance Savings Plans, or EC2 Reserved Instances commitments that you want to model in a Bill Scenario.
The BatchCreateBillScenarioCommitmentModification
operation doesn't have its own IAM permission. To authorize this operation for Amazon Web Services principals, include the permission bcm-pricing-calculator:CreateBillScenarioCommitmentModification
in your policies.
Create Amazon Web Services service usage that you want to model in a Bill Scenario.
", + "documentation":"Create Amazon Web Services service usage that you want to model in a Bill Scenario.
The BatchCreateBillScenarioUsageModification
operation doesn't have its own IAM permission. To authorize this operation for Amazon Web Services principals, include the permission bcm-pricing-calculator:CreateBillScenarioUsageModification
in your policies.
Create Amazon Web Services service usage that you want to model in a Workload Estimate.
", + "documentation":"Create Amazon Web Services service usage that you want to model in a Workload Estimate.
The BatchCreateWorkloadEstimateUsage
operation doesn't have its own IAM permission. To authorize this operation for Amazon Web Services principals, include the permission bcm-pricing-calculator:CreateWorkloadEstimateUsage
in your policies.
Delete commitment that you have created in a Bill Scenario. You can only delete a commitment that you had added and cannot model deletion (or removal) of a existing commitment. If you want model deletion of an existing commitment, see the negate BillScenarioCommitmentModificationAction of BatchCreateBillScenarioCommitmentModification operation.
", + "documentation":"Delete commitment that you have created in a Bill Scenario. You can only delete a commitment that you had added and cannot model deletion (or removal) of a existing commitment. If you want model deletion of an existing commitment, see the negate BillScenarioCommitmentModificationAction of BatchCreateBillScenarioCommitmentModification operation.
The BatchDeleteBillScenarioCommitmentModification
operation doesn't have its own IAM permission. To authorize this operation for Amazon Web Services principals, include the permission bcm-pricing-calculator:DeleteBillScenarioCommitmentModification
in your policies.
Delete usage that you have created in a Bill Scenario. You can only delete usage that you had added and cannot model deletion (or removal) of a existing usage. If you want model removal of an existing usage, see BatchUpdateBillScenarioUsageModification.
", + "documentation":"Delete usage that you have created in a Bill Scenario. You can only delete usage that you had added and cannot model deletion (or removal) of a existing usage. If you want model removal of an existing usage, see BatchUpdateBillScenarioUsageModification.
The BatchDeleteBillScenarioUsageModification
operation doesn't have its own IAM permission. To authorize this operation for Amazon Web Services principals, include the permission bcm-pricing-calculator:DeleteBillScenarioUsageModification
in your policies.
Delete usage that you have created in a Workload estimate. You can only delete usage that you had added and cannot model deletion (or removal) of a existing usage. If you want model removal of an existing usage, see BatchUpdateWorkloadEstimateUsage.
", + "documentation":"Delete usage that you have created in a Workload estimate. You can only delete usage that you had added and cannot model deletion (or removal) of a existing usage. If you want model removal of an existing usage, see BatchUpdateWorkloadEstimateUsage.
The BatchDeleteWorkloadEstimateUsage
operation doesn't have its own IAM permission. To authorize this operation for Amazon Web Services principals, include the permission bcm-pricing-calculator:DeleteWorkloadEstimateUsage
in your policies.
Update a newly added or existing commitment. You can update the commitment group based on a commitment ID and a Bill scenario ID.
", + "documentation":"Update a newly added or existing commitment. You can update the commitment group based on a commitment ID and a Bill scenario ID.
The BatchUpdateBillScenarioCommitmentModification
operation doesn't have its own IAM permission. To authorize this operation for Amazon Web Services principals, include the permission bcm-pricing-calculator:UpdateBillScenarioCommitmentModification
in your policies.
Update a newly added or existing usage lines. You can update the usage amounts, usage hour, and usage group based on a usage ID and a Bill scenario ID.
", + "documentation":"Update a newly added or existing usage lines. You can update the usage amounts, usage hour, and usage group based on a usage ID and a Bill scenario ID.
The BatchUpdateBillScenarioUsageModification
operation doesn't have its own IAM permission. To authorize this operation for Amazon Web Services principals, include the permission bcm-pricing-calculator:UpdateBillScenarioUsageModification
in your policies.
Update a newly added or existing usage lines. You can update the usage amounts and usage group based on a usage ID and a Workload estimate ID.
", + "documentation":"Update a newly added or existing usage lines. You can update the usage amounts and usage group based on a usage ID and a Workload estimate ID.
The BatchUpdateWorkloadEstimateUsage
operation doesn't have its own IAM permission. To authorize this operation for Amazon Web Services principals, include the permission bcm-pricing-calculator:UpdateWorkloadEstimateUsage
in your policies.
Filter bill estimates based on their creation date.
" + "documentation":"Filter bill estimates based on the creation date.
" }, "expiresAtFilter":{ "shape":"FilterTimestamp", - "documentation":"Filter bill estimates based on their expiration date.
" + "documentation":"Filter bill estimates based on the expiration date.
" }, "nextToken":{ "shape":"NextPageToken", @@ -3061,11 +3066,11 @@ }, "createdAtFilter":{ "shape":"FilterTimestamp", - "documentation":"Filter bill scenarios based on their creation date.
" + "documentation":"Filter bill scenarios based on the creation date.
" }, "expiresAtFilter":{ "shape":"FilterTimestamp", - "documentation":"Filter bill scenarios based on their expiration date.
" + "documentation":"Filter bill scenarios based on the expiration date.
" }, "nextToken":{ "shape":"NextPageToken", @@ -3232,11 +3237,11 @@ "members":{ "createdAtFilter":{ "shape":"FilterTimestamp", - "documentation":"Filter workload estimates based on their creation date.
" + "documentation":"Filter workload estimates based on the creation date.
" }, "expiresAtFilter":{ "shape":"FilterTimestamp", - "documentation":"Filter workload estimates based on their expiration date.
" + "documentation":"Filter workload estimates based on the expiration date.
" }, "filters":{ "shape":"ListWorkloadEstimatesFilters", diff --git a/botocore/data/bedrock-agent-runtime/2023-07-26/service-2.json b/botocore/data/bedrock-agent-runtime/2023-07-26/service-2.json index 36c06b21b6..3e00d62b0c 100644 --- a/botocore/data/bedrock-agent-runtime/2023-07-26/service-2.json +++ b/botocore/data/bedrock-agent-runtime/2023-07-26/service-2.json @@ -102,7 +102,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeAgent
.
Sends a prompt for the agent to process and respond to. Note the following fields for the request:
To continue the same conversation with an agent, use the same sessionId
value in the request.
To activate trace enablement, turn enableTrace
to true
. Trace enablement helps you follow the agent's reasoning process that led it to the information it processed, the actions it took, and the final result it yielded. For more information, see Trace enablement.
To stream agent responses, make sure that only orchestration prompt is enabled. Agent streaming is not supported for the following steps:
Pre-processing
Post-processing
Agent with 1 Knowledge base and User Input
not enabled
End a conversation by setting endSession
to true
.
In the sessionState
object, you can include attributes for the session or prompt or, if you configured an action group to return control, results from invocation of the action group.
The response is returned in the bytes
field of the chunk
object.
The attribution
object contains citations for parts of the response.
If you set enableTrace
to true
in the request, you can trace the agent's steps and reasoning process that led it to the response.
If the action predicted was configured to return control, the response returns parameters for the action, elicited from the user, in the returnControl
field.
Errors are also surfaced in the response.
Sends a prompt for the agent to process and respond to. Note the following fields for the request:
To continue the same conversation with an agent, use the same sessionId
value in the request.
To activate trace enablement, turn enableTrace
to true
. Trace enablement helps you follow the agent's reasoning process that led it to the information it processed, the actions it took, and the final result it yielded. For more information, see Trace enablement.
To stream agent responses, make sure that only orchestration prompt is enabled. Agent streaming is not supported for the following steps:
Pre-processing
Post-processing
Agent with 1 Knowledge base and User Input
not enabled
End a conversation by setting endSession
to true
.
In the sessionState
object, you can include attributes for the session or prompt or, if you configured an action group to return control, results from invocation of the action group.
The response contains both chunk and trace attributes.
The final response is returned in the bytes
field of the chunk
object. The InvokeAgent
returns one chunk for the entire interaction.
The attribution
object contains citations for parts of the response.
If you set enableTrace
to true
in the request, you can trace the agent's steps and reasoning process that led it to the response.
If the action predicted was configured to return control, the response returns parameters for the action, elicited from the user, in the returnControl
field.
Errors are also surfaced in the response.
Invokes an inline Amazon Bedrock agent using the configurations you provide with the request.
Specify the following fields for security purposes.
(Optional) customerEncryptionKeyArn
– The Amazon Resource Name (ARN) of a KMS key to encrypt the creation of the agent.
(Optional) idleSessionTTLinSeconds
– Specify the number of seconds for which the agent should maintain session information. After this time expires, the subsequent InvokeInlineAgent
request begins a new session.
To override the default prompt behavior for agent orchestration and to use advanced prompts, include a promptOverrideConfiguration
object. For more information, see Advanced prompts.
The agent instructions will not be honored if your agent has only one knowledge base, uses default prompts, has no action group, and user input is disabled.
The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeInlineAgent
.
Invokes an inline Amazon Bedrock agent using the configurations you provide with the request.
Specify the following fields for security purposes.
(Optional) customerEncryptionKeyArn
– The Amazon Resource Name (ARN) of a KMS key to encrypt the creation of the agent.
(Optional) idleSessionTTLinSeconds
– Specify the number of seconds for which the agent should maintain session information. After this time expires, the subsequent InvokeInlineAgent
request begins a new session.
To override the default prompt behavior for agent orchestration and to use advanced prompts, include a promptOverrideConfiguration
object. For more information, see Advanced prompts.
The agent instructions will not be honored if your agent has only one knowledge base, uses default prompts, has no action group, and user input is disabled.
Queries a knowledge base and generates responses based on the retrieved results, with output in streaming format.
The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeModelWithResponseStream
.
Queries a knowledge base and generates responses based on the retrieved results, with output in streaming format.
The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeModelWithResponseStream
.
This operation requires permission for the bedrock:RetrieveAndGenerate
action.
The citation.
" + "documentation":"The citation.
", + "deprecated":true, + "deprecatedMessage":"Citation is deprecated. Please use GeneratedResponsePart and RetrievedReferences for citation event." + }, + "generatedResponsePart":{ + "shape":"GeneratedResponsePart", + "documentation":"The generated response to the citation event.
" + }, + "retrievedReferences":{ + "shape":"RetrievedReferences", + "documentation":"The retrieved references of the citation event.
" } }, "documentation":"A citation event.
", @@ -1314,7 +1324,16 @@ }, "FlowCompletionReason":{ "type":"string", - "enum":["SUCCESS"] + "enum":[ + "SUCCESS", + "INPUT_REQUIRED" + ] + }, + "FlowExecutionId":{ + "type":"string", + "max":100, + "min":2, + "pattern":"^[0-9a-zA-Z._:-]+$" }, "FlowIdentifier":{ "type":"string", @@ -1326,14 +1345,17 @@ "type":"structure", "required":[ "content", - "nodeName", - "nodeOutputName" + "nodeName" ], "members":{ "content":{ "shape":"FlowInputContent", "documentation":"Contains information about an input into the prompt flow.
" }, + "nodeInputName":{ + "shape":"NodeInputName", + "documentation":"The name of the input from the flow input node.
" + }, "nodeName":{ "shape":"NodeName", "documentation":"The name of the flow input node that begins the prompt flow.
" @@ -1363,6 +1385,42 @@ "max":1, "min":1 }, + "FlowMultiTurnInputContent":{ + "type":"structure", + "members":{ + "document":{ + "shape":"Document", + "documentation":"The requested additional input to send back to the multi-turn flow node.
" + } + }, + "documentation":"The content structure containing input information for multi-turn flow interactions.
", + "union":true + }, + "FlowMultiTurnInputRequestEvent":{ + "type":"structure", + "required":[ + "content", + "nodeName", + "nodeType" + ], + "members":{ + "content":{ + "shape":"FlowMultiTurnInputContent", + "documentation":"The content payload containing the input request details for the multi-turn interaction.
" + }, + "nodeName":{ + "shape":"NodeName", + "documentation":"The name of the node in the flow that is requesting the input.
" + }, + "nodeType":{ + "shape":"NodeType", + "documentation":"The type of the node in the flow that is requesting the input.
" + } + }, + "documentation":"Response object from the flow multi-turn node requesting additional information.
", + "event":true, + "sensitive":true + }, "FlowOutputContent":{ "type":"structure", "members":{ @@ -1422,6 +1480,10 @@ "shape":"FlowCompletionEvent", "documentation":"Contains information about why the flow completed.
" }, + "flowMultiTurnInputRequestEvent":{ + "shape":"FlowMultiTurnInputRequestEvent", + "documentation":"The event stream containing the multi-turn input request information from the flow.
" + }, "flowOutputEvent":{ "shape":"FlowOutputEvent", "documentation":"Contains information about an output from flow invocation.
" @@ -2614,7 +2676,11 @@ "InternalServerException":{ "type":"structure", "members":{ - "message":{"shape":"NonBlankString"} + "message":{"shape":"NonBlankString"}, + "reason":{ + "shape":"String", + "documentation":"The reason for the exception. If the reason is BEDROCK_MODEL_INVOCATION_SERVICE_UNAVAILABLE
, the model invocation service is unavailable. Retry your request.
An internal server error occurred. Retry your request.
", "error":{"httpStatusCode":500}, @@ -2805,6 +2871,10 @@ "shape":"Boolean", "documentation":"Specifies whether to return the trace for the flow or not. Traces track inputs and outputs for nodes in the flow. For more information, see Track each step in your prompt flow by viewing its trace in Amazon Bedrock.
" }, + "executionId":{ + "shape":"FlowExecutionId", + "documentation":"The unique identifier for the current flow execution. If you don't provide a value, Amazon Bedrock creates the identifier for you.
" + }, "flowAliasIdentifier":{ "shape":"FlowAliasIdentifier", "documentation":"The unique identifier of the flow alias.
", @@ -2831,6 +2901,12 @@ "type":"structure", "required":["responseStream"], "members":{ + "executionId":{ + "shape":"FlowExecutionId", + "documentation":"The unique identifier for the current flow execution.
", + "location":"header", + "locationName":"x-amz-bedrock-flow-execution-id" + }, "responseStream":{ "shape":"FlowResponseStream", "documentation":"The output of the flow, returned as a stream. If there's an error, the error is returned.
" @@ -2903,6 +2979,10 @@ "documentation":"The unique identifier of the session. Use the same value across requests to continue the same conversation.
", "location":"uri", "locationName":"sessionId" + }, + "streamingConfigurations":{ + "shape":"StreamingConfigurations", + "documentation":"Specifies the configurations for streaming.
To use agent streaming, you need permissions to perform the bedrock:InvokeModelWithResponseStream
action.
The status of the alias of the agent and whether it is ready for use. The following statuses are possible:
CREATING – The agent alias is being created.
PREPARED – The agent alias is finished being created or updated and is ready to be invoked.
FAILED – The agent alias API operation failed.
UPDATING – The agent alias is being updated.
DELETING – The agent alias is being deleted.
The status of the alias of the agent and whether it is ready for use. The following statuses are possible:
CREATING – The agent alias is being created.
PREPARED – The agent alias is finished being created or updated and is ready to be invoked.
FAILED – The agent alias API operation failed.
UPDATING – The agent alias is being updated.
DELETING – The agent alias is being deleted.
DISSOCIATED - The agent alias has no version associated with it.
Indicates that the CachePointBlock is of the default type
" + } + }, + "documentation":"Indicates where a cache checkpoint is located. All information before this checkpoint is cached to be accessed on subsequent requests.
" + }, + "CachePointType":{ + "type":"string", + "enum":["default"] + }, "ChatPromptTemplateConfiguration":{ "type":"structure", "required":["messages"], @@ -2678,6 +2694,10 @@ "ContentBlock":{ "type":"structure", "members":{ + "cachePoint":{ + "shape":"CachePointBlock", + "documentation":"Creates a cache checkpoint within a message.
" + }, "text":{ "shape":"String", "documentation":"The text in the message.
" @@ -4910,7 +4930,7 @@ "FlowNodes":{ "type":"list", "member":{"shape":"FlowNode"}, - "max":20, + "max":40, "min":0 }, "FlowStatus":{ @@ -5092,6 +5112,14 @@ "shape":"UnknownConnectionTargetInputFlowValidationDetails", "documentation":"Details about an unknown target input for a connection.
" }, + "unknownNodeInput":{ + "shape":"UnknownNodeInputFlowValidationDetails", + "documentation":"Details about an unknown input for a node.
" + }, + "unknownNodeOutput":{ + "shape":"UnknownNodeOutputFlowValidationDetails", + "documentation":"Details about an unknown output for a node.
" + }, "unreachableNode":{ "shape":"UnreachableNodeFlowValidationDetails", "documentation":"Details about an unreachable node in the flow.
" @@ -5142,7 +5170,9 @@ "MultipleNodeInputConnections", "UnfulfilledNodeInput", "UnsatisfiedConnectionConditions", - "Unspecified" + "Unspecified", + "UnknownNodeInput", + "UnknownNodeOutput" ] }, "FlowValidations":{ @@ -7227,7 +7257,7 @@ "MaximumLength":{ "type":"integer", "box":true, - "max":4096, + "max":8192, "min":0 }, "MemoryConfiguration":{ @@ -8131,7 +8161,7 @@ "PromptInputVariablesList":{ "type":"list", "member":{"shape":"PromptInputVariable"}, - "max":5, + "max":10, "min":0, "sensitive":true }, @@ -9451,6 +9481,10 @@ "SystemContentBlock":{ "type":"structure", "members":{ + "cachePoint":{ + "shape":"CachePointBlock", + "documentation":"Creates a cache checkpoint within a tool designation
" + }, "text":{ "shape":"NonEmptyString", "documentation":"The text in the system prompt.
" @@ -9544,6 +9578,10 @@ "type":"structure", "required":["text"], "members":{ + "cachePoint":{ + "shape":"CachePointBlock", + "documentation":"A cache checkpoint within a template configuration.
" + }, "inputVariables":{ "shape":"PromptInputVariablesList", "documentation":"An array of the variables in the prompt template.
" @@ -9571,6 +9609,10 @@ "Tool":{ "type":"structure", "members":{ + "cachePoint":{ + "shape":"CachePointBlock", + "documentation":"Creates a cache checkpoint within a tool designation
" + }, "toolSpec":{ "shape":"ToolSpecification", "documentation":"The specification for the tool.
" @@ -9800,6 +9842,42 @@ }, "documentation":"Details about an unknown target input for a connection.
" }, + "UnknownNodeInputFlowValidationDetails":{ + "type":"structure", + "required":[ + "input", + "node" + ], + "members":{ + "input":{ + "shape":"FlowNodeInputName", + "documentation":"The name of the node with the unknown input.
" + }, + "node":{ + "shape":"FlowNodeName", + "documentation":"The name of the unknown input.
" + } + }, + "documentation":"Details about an unknown input for a node.
" + }, + "UnknownNodeOutputFlowValidationDetails":{ + "type":"structure", + "required":[ + "node", + "output" + ], + "members":{ + "node":{ + "shape":"FlowNodeName", + "documentation":"The name of the node with the unknown output.
" + }, + "output":{ + "shape":"FlowNodeOutputName", + "documentation":"The name of the unknown output.
" + } + }, + "documentation":"Details about an unknown output for a node.
" + }, "UnreachableNodeFlowValidationDetails":{ "type":"structure", "required":["node"], diff --git a/botocore/data/bedrock-runtime/2023-09-30/service-2.json b/botocore/data/bedrock-runtime/2023-09-30/service-2.json index 677a523eb4..dc5ad400a4 100644 --- a/botocore/data/bedrock-runtime/2023-09-30/service-2.json +++ b/botocore/data/bedrock-runtime/2023-09-30/service-2.json @@ -2744,7 +2744,7 @@ "type":"string", "max":64, "min":1, - "pattern":"[a-zA-Z][a-zA-Z0-9_]*" + "pattern":"[a-zA-Z0-9_-]+" }, "ToolResultBlock":{ "type":"structure", diff --git a/botocore/data/bedrock/2023-04-20/service-2.json b/botocore/data/bedrock/2023-04-20/service-2.json index 11b217be24..7ce80cb04f 100644 --- a/botocore/data/bedrock/2023-04-20/service-2.json +++ b/botocore/data/bedrock/2023-04-20/service-2.json @@ -2163,6 +2163,10 @@ "inferenceParams":{ "shape":"EvaluationModelInferenceParams", "documentation":"Each Amazon Bedrock support different inference parameters that change how the model behaves during inference.
" + }, + "performanceConfig":{ + "shape":"PerformanceConfiguration", + "documentation":"Specifies performance settings for the model or inference profile.
" } }, "documentation":"Contains the ARN of the Amazon Bedrock model or inference profile specified in your evaluation job. Each Amazon Bedrock model supports different inferenceParams
. To learn more about supported inference parameters for Amazon Bedrock models, see Inference parameters for foundation models.
The inferenceParams
are specified using JSON. To successfully insert JSON as string make sure that all quotations are properly escaped. For example, \"temperature\":\"0.25\"
key value pair would need to be formatted as \\\"temperature\\\":\\\"0.25\\\"
to successfully accepted in the request.
Specifies whether to use the latency-optimized or standard version of a model or inference profile.
" + } + }, + "documentation":"Contains performance settings for a model.
" + }, "PositiveInteger":{ "type":"integer", "box":true, diff --git a/botocore/data/cloudtrail/2013-11-01/service-2.json b/botocore/data/cloudtrail/2013-11-01/service-2.json index 524d093523..8f59fe1b2f 100644 --- a/botocore/data/cloudtrail/2013-11-01/service-2.json +++ b/botocore/data/cloudtrail/2013-11-01/service-2.json @@ -533,7 +533,7 @@ {"shape":"NoManagementAccountSLRExistsException"}, {"shape":"ThrottlingException"} ], - "documentation":"Describes the settings for the Insights event selectors that you configured for your trail or event data store. GetInsightSelectors
shows if CloudTrail Insights event logging is enabled on the trail or event data store, and if it is, which Insights types are enabled. If you run GetInsightSelectors
on a trail or event data store that does not have Insights events enabled, the operation throws the exception InsightNotEnabledException
Specify either the EventDataStore
parameter to get Insights event selectors for an event data store, or the TrailName
parameter to the get Insights event selectors for a trail. You cannot specify these parameters together.
For more information, see Logging CloudTrail Insights events in the CloudTrail User Guide.
", + "documentation":"Describes the settings for the Insights event selectors that you configured for your trail or event data store. GetInsightSelectors
shows if CloudTrail Insights event logging is enabled on the trail or event data store, and if it is, which Insights types are enabled. If you run GetInsightSelectors
on a trail or event data store that does not have Insights events enabled, the operation throws the exception InsightNotEnabledException
Specify either the EventDataStore
parameter to get Insights event selectors for an event data store, or the TrailName
parameter to the get Insights event selectors for a trail. You cannot specify these parameters together.
For more information, see Working with CloudTrail Insights in the CloudTrail User Guide.
", "idempotent":true }, "GetQueryResults":{ @@ -836,7 +836,7 @@ {"shape":"NoManagementAccountSLRExistsException"}, {"shape":"InsufficientDependencyServiceAccessPermissionException"} ], - "documentation":"Configures event selectors (also referred to as basic event selectors) or advanced event selectors for your trail. You can use either AdvancedEventSelectors
or EventSelectors
, but not both. If you apply AdvancedEventSelectors
to a trail, any existing EventSelectors
are overwritten.
You can use AdvancedEventSelectors
to log management events, data events for all resource types, and network activity events.
You can use EventSelectors
to log management events and data events for the following resource types:
AWS::DynamoDB::Table
AWS::Lambda::Function
AWS::S3::Object
You can't use EventSelectors
to log network activity events.
If you want your trail to log Insights events, be sure the event selector or advanced event selector enables logging of the Insights event types you want configured for your trail. For more information about logging Insights events, see Logging Insights events in the CloudTrail User Guide. By default, trails created without specific event selectors are configured to log all read and write management events, and no data events or network activity events.
When an event occurs in your account, CloudTrail evaluates the event selectors or advanced event selectors in all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event.
Example
You create an event selector for a trail and specify that you want to log write-only events.
The EC2 GetConsoleOutput
and RunInstances
API operations occur in your account.
CloudTrail evaluates whether the events match your event selectors.
The RunInstances
is a write-only event and it matches your event selector. The trail logs the event.
The GetConsoleOutput
is a read-only event that doesn't match your event selector. The trail doesn't log the event.
The PutEventSelectors
operation must be called from the Region in which the trail was created; otherwise, an InvalidHomeRegionException
exception is thrown.
You can configure up to five event selectors for each trail.
You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. For more information, see Logging management events, Logging data events, Logging network activity events, and Quotas in CloudTrail in the CloudTrail User Guide.
", + "documentation":"Configures event selectors (also referred to as basic event selectors) or advanced event selectors for your trail. You can use either AdvancedEventSelectors
or EventSelectors
, but not both. If you apply AdvancedEventSelectors
to a trail, any existing EventSelectors
are overwritten.
You can use AdvancedEventSelectors
to log management events, data events for all resource types, and network activity events.
You can use EventSelectors
to log management events and data events for the following resource types:
AWS::DynamoDB::Table
AWS::Lambda::Function
AWS::S3::Object
You can't use EventSelectors
to log network activity events.
If you want your trail to log Insights events, be sure the event selector or advanced event selector enables logging of the Insights event types you want configured for your trail. For more information about logging Insights events, see Working with CloudTrail Insights in the CloudTrail User Guide. By default, trails created without specific event selectors are configured to log all read and write management events, and no data events or network activity events.
When an event occurs in your account, CloudTrail evaluates the event selectors or advanced event selectors in all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event.
Example
You create an event selector for a trail and specify that you want to log write-only events.
The EC2 GetConsoleOutput
and RunInstances
API operations occur in your account.
CloudTrail evaluates whether the events match your event selectors.
The RunInstances
is a write-only event and it matches your event selector. The trail logs the event.
The GetConsoleOutput
is a read-only event that doesn't match your event selector. The trail doesn't log the event.
The PutEventSelectors
operation must be called from the Region in which the trail was created; otherwise, an InvalidHomeRegionException
exception is thrown.
You can configure up to five event selectors for each trail.
You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. For more information, see Logging management events, Logging data events, Logging network activity events, and Quotas in CloudTrail in the CloudTrail User Guide.
", "idempotent":true }, "PutInsightSelectors":{ @@ -865,7 +865,7 @@ {"shape":"NoManagementAccountSLRExistsException"}, {"shape":"ThrottlingException"} ], - "documentation":"Lets you enable Insights event logging by specifying the Insights selectors that you want to enable on an existing trail or event data store. You also use PutInsightSelectors
to turn off Insights event logging, by passing an empty list of Insights types. The valid Insights event types are ApiErrorRateInsight
and ApiCallRateInsight
.
To enable Insights on an event data store, you must specify the ARNs (or ID suffix of the ARNs) for the source event data store (EventDataStore
) and the destination event data store (InsightsDestination
). The source event data store logs management events and enables Insights. The destination event data store logs Insights events based upon the management event activity of the source event data store. The source and destination event data stores must belong to the same Amazon Web Services account.
To log Insights events for a trail, you must specify the name (TrailName
) of the CloudTrail trail for which you want to change or add Insights selectors.
To log CloudTrail Insights events on API call volume, the trail or event data store must log write
management events. To log CloudTrail Insights events on API error rate, the trail or event data store must log read
or write
management events. You can call GetEventSelectors
on a trail to check whether the trail logs management events. You can call GetEventDataStore
on an event data store to check whether the event data store logs management events.
For more information, see Logging CloudTrail Insights events in the CloudTrail User Guide.
", + "documentation":"Lets you enable Insights event logging by specifying the Insights selectors that you want to enable on an existing trail or event data store. You also use PutInsightSelectors
to turn off Insights event logging, by passing an empty list of Insights types. The valid Insights event types are ApiErrorRateInsight
and ApiCallRateInsight
.
To enable Insights on an event data store, you must specify the ARNs (or ID suffix of the ARNs) for the source event data store (EventDataStore
) and the destination event data store (InsightsDestination
). The source event data store logs management events and enables Insights. The destination event data store logs Insights events based upon the management event activity of the source event data store. The source and destination event data stores must belong to the same Amazon Web Services account.
To log Insights events for a trail, you must specify the name (TrailName
) of the CloudTrail trail for which you want to change or add Insights selectors.
To log CloudTrail Insights events on API call volume, the trail or event data store must log write
management events. To log CloudTrail Insights events on API error rate, the trail or event data store must log read
or write
management events. You can call GetEventSelectors
on a trail to check whether the trail logs management events. You can call GetEventDataStore
on an event data store to check whether the event data store logs management events.
For more information, see Working with CloudTrail Insights in the CloudTrail User Guide.
", "idempotent":true }, "PutResourcePolicy":{ @@ -967,6 +967,22 @@ ], "documentation":"Restores a deleted event data store specified by EventDataStore
, which accepts an event data store ARN. You can only restore a deleted event data store within the seven-day wait period after deletion. Restoring an event data store can take several minutes, depending on the size of the event data store.
Searches sample queries and returns a list of sample queries that are sorted by relevance. To search for sample queries, provide a natural language SearchPhrase
in English.
Contains all selector statements in an advanced event selector.
" } }, - "documentation":"Advanced event selectors let you create fine-grained selectors for CloudTrail management, data, and network activity events. They help you control costs by logging only those events that are important to you. For more information about configuring advanced event selectors, see the Logging data events, Logging network activity events, and Logging management events topics in the CloudTrail User Guide.
You cannot apply both event selectors and advanced event selectors to a trail.
For information about configurable advanced event selector fields, see AdvancedEventSelector in the CloudTrailUser Guide.
" + "documentation":"Advanced event selectors let you create fine-grained selectors for CloudTrail management, data, and network activity events. They help you control costs by logging only those events that are important to you. For more information about configuring advanced event selectors, see the Logging data events, Logging network activity events, and Logging management events topics in the CloudTrail User Guide.
You cannot apply both event selectors and advanced event selectors to a trail.
For information about configurable advanced event selector fields, see AdvancedEventSelector in the CloudTrail API Reference.
" }, "AdvancedEventSelectors":{ "type":"list", @@ -1361,7 +1377,7 @@ "members":{ "Field":{ "shape":"SelectorField", - "documentation":"A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the field is used only for selecting events as filtering is not supported.
For more information, see AdvancedFieldSelector in the CloudTrailUser Guide.
" + "documentation":"A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the field is used only for selecting events as filtering is not supported.
For more information, see AdvancedFieldSelector in the CloudTrail API Reference.
Selectors don't support the use of wildcards like *
. To match multiple values with a single condition, you may use StartsWith
, EndsWith
, NotStartsWith
, or NotEndsWith
to explicitly match the beginning or end of the event field.
The settings for the source S3 bucket.
" }, + "SampleQueryDescription":{"type":"string"}, + "SampleQueryName":{"type":"string"}, + "SampleQueryRelevance":{"type":"float"}, + "SampleQuerySQL":{"type":"string"}, + "SearchSampleQueriesMaxResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "SearchSampleQueriesRequest":{ + "type":"structure", + "required":["SearchPhrase"], + "members":{ + "SearchPhrase":{ + "shape":"SearchSampleQueriesSearchPhrase", + "documentation":"The natural language phrase to use for the semantic search. The phrase must be in English. The length constraint is in characters, not words.
" + }, + "MaxResults":{ + "shape":"SearchSampleQueriesMaxResults", + "documentation":"The maximum number of results to return on a single page. The default value is 10.
" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"A token you can use to get the next page of results. The length constraint is in characters, not words.
" + } + } + }, + "SearchSampleQueriesResponse":{ + "type":"structure", + "members":{ + "SearchResults":{ + "shape":"SearchSampleQueriesSearchResults", + "documentation":"A list of objects containing the search results ordered from most relevant to least relevant.
" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"A token you can use to get the next page of results.
" + } + } + }, + "SearchSampleQueriesSearchPhrase":{ + "type":"string", + "max":1000, + "min":2, + "pattern":"^[ -~\\n]*$" + }, + "SearchSampleQueriesSearchResult":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"SampleQueryName", + "documentation":"The name of a sample query.
" + }, + "Description":{ + "shape":"SampleQueryDescription", + "documentation":"A longer description of a sample query.
" + }, + "SQL":{ + "shape":"SampleQuerySQL", + "documentation":"The SQL code of the sample query.
" + }, + "Relevance":{ + "shape":"SampleQueryRelevance", + "documentation":"A value between 0 and 1 indicating the similarity between the search phrase and result.
" + } + }, + "documentation":" A search result returned by the SearchSampleQueries
operation.
An array of strings that specify the compute types that are allowed for the batch build. See Build environment compute types in the CodeBuild User Guide for these values.
" + }, + "fleetsAllowed":{ + "shape":"FleetsAllowed", + "documentation":"An array of strings that specify the fleets that are allowed for the batch build. See Run builds on reserved capacity fleets in the CodeBuild User Guide for more information.
" } }, "documentation":"Specifies restrictions for the batch build.
" @@ -2466,6 +2470,10 @@ "max":100, "min":1 }, + "FleetsAllowed":{ + "type":"list", + "member":{"shape":"NonEmptyString"} + }, "GetReportGroupTrendInput":{ "type":"structure", "required":[ @@ -3549,7 +3557,7 @@ }, "reportBuildStatus":{ "shape":"WrapperBoolean", - "documentation":" Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, or Bitbucket. If this is set and you use a different source provider, an invalidInputException
is thrown.
To be able to report the build status to the source provider, the user associated with the source provider must have write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the CodeBuild User Guide.
The status of a build triggered by a webhook is always reported to your source provider.
If your project's builds are triggered by a webhook, you must push a new commit to the repo for a change to this property to take effect.
" + "documentation":" Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, GitLab, GitLab Self Managed, or Bitbucket. If this is set and you use a different source provider, an invalidInputException
is thrown.
To be able to report the build status to the source provider, the user associated with the source provider must have write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the CodeBuild User Guide.
The status of a build triggered by a webhook is always reported to your source provider.
If your project's builds are triggered by a webhook, you must push a new commit to the repo for a change to this property to take effect.
" }, "buildStatusConfig":{ "shape":"BuildStatusConfig", @@ -4412,7 +4420,7 @@ }, "reportBuildStatusOverride":{ "shape":"WrapperBoolean", - "documentation":" Set to true to report to your source provider the status of a build's start and completion. If you use this option with a source provider other than GitHub, GitHub Enterprise, or Bitbucket, an invalidInputException
is thrown.
To be able to report the build status to the source provider, the user associated with the source provider must have write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the CodeBuild User Guide.
The status of a build triggered by a webhook is always reported to your source provider.
Set to true to report to your source provider the status of a build's start and completion. If you use this option with a source provider other than GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, or Bitbucket, an invalidInputException
is thrown.
To be able to report the build status to the source provider, the user associated with the source provider must have write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the CodeBuild User Guide.
The status of a build triggered by a webhook is always reported to your source provider.
The type of webhook filter. There are nine webhook filter types: EVENT
, ACTOR_ACCOUNT_ID
, HEAD_REF
, BASE_REF
, FILE_PATH
, COMMIT_MESSAGE
, TAG_NAME
, RELEASE_NAME
, and WORKFLOW_NAME
.
EVENT
A webhook event triggers a build when the provided pattern
matches one of nine event types: PUSH
, PULL_REQUEST_CREATED
, PULL_REQUEST_UPDATED
, PULL_REQUEST_CLOSED
, PULL_REQUEST_REOPENED
, PULL_REQUEST_MERGED
, RELEASED
, PRERELEASED
, and WORKFLOW_JOB_QUEUED
. The EVENT
patterns are specified as a comma-separated string. For example, PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED
filters all push, pull request created, and pull request updated events.
Types PULL_REQUEST_REOPENED
and WORKFLOW_JOB_QUEUED
work with GitHub and GitHub Enterprise only. Types RELEASED
and PRERELEASED
work with GitHub only.
ACTOR_ACCOUNT_ID
A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression pattern
.
HEAD_REF
A webhook event triggers a build when the head reference matches the regular expression pattern
. For example, refs/heads/branch-name
and refs/tags/tag-name
.
Works with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events.
BASE_REF
A webhook event triggers a build when the base reference matches the regular expression pattern
. For example, refs/heads/branch-name
.
Works with pull request events only.
FILE_PATH
A webhook triggers a build when the path of a changed file matches the regular expression pattern
.
Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.
COMMIT_MESSAGE
A webhook triggers a build when the head commit message matches the regular expression pattern
.
Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.
TAG_NAME
A webhook triggers a build when the tag name of the release matches the regular expression pattern
.
Works with RELEASED
and PRERELEASED
events only.
RELEASE_NAME
A webhook triggers a build when the release name matches the regular expression pattern
.
Works with RELEASED
and PRERELEASED
events only.
REPOSITORY_NAME
A webhook triggers a build when the repository name matches the regular expression pattern.
Works with GitHub global or organization webhooks only.
WORKFLOW_NAME
A webhook triggers a build when the workflow name matches the regular expression pattern
.
Works with WORKFLOW_JOB_QUEUED
events only.
The type of webhook filter. There are nine webhook filter types: EVENT
, ACTOR_ACCOUNT_ID
, HEAD_REF
, BASE_REF
, FILE_PATH
, COMMIT_MESSAGE
, TAG_NAME
, RELEASE_NAME
, and WORKFLOW_NAME
.
EVENT
A webhook event triggers a build when the provided pattern
matches one of nine event types: PUSH
, PULL_REQUEST_CREATED
, PULL_REQUEST_UPDATED
, PULL_REQUEST_CLOSED
, PULL_REQUEST_REOPENED
, PULL_REQUEST_MERGED
, RELEASED
, PRERELEASED
, and WORKFLOW_JOB_QUEUED
. The EVENT
patterns are specified as a comma-separated string. For example, PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED
filters all push, pull request created, and pull request updated events.
Types PULL_REQUEST_REOPENED
and WORKFLOW_JOB_QUEUED
work with GitHub and GitHub Enterprise only. Types RELEASED
and PRERELEASED
work with GitHub only.
ACTOR_ACCOUNT_ID
A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression pattern
.
HEAD_REF
A webhook event triggers a build when the head reference matches the regular expression pattern
. For example, refs/heads/branch-name
and refs/tags/tag-name
.
Works with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events.
BASE_REF
A webhook event triggers a build when the base reference matches the regular expression pattern
. For example, refs/heads/branch-name
.
Works with pull request events only.
FILE_PATH
A webhook triggers a build when the path of a changed file matches the regular expression pattern
.
Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.
COMMIT_MESSAGE
A webhook triggers a build when the head commit message matches the regular expression pattern
.
Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.
TAG_NAME
A webhook triggers a build when the tag name of the release matches the regular expression pattern
.
Works with RELEASED
and PRERELEASED
events only.
RELEASE_NAME
A webhook triggers a build when the release name matches the regular expression pattern
.
Works with RELEASED
and PRERELEASED
events only.
REPOSITORY_NAME
A webhook triggers a build when the repository name matches the regular expression pattern.
Works with GitHub global or organization webhooks only.
WORKFLOW_NAME
A webhook triggers a build when the workflow name matches the regular expression pattern
.
Works with WORKFLOW_JOB_QUEUED
events only.
For CodeBuild-hosted Buildkite runner builds, WORKFLOW_NAME filters will filter by pipeline name.
Used to indicate that the pattern
determines which webhook events do not trigger a build. If true, then a webhook event that does not match the pattern
triggers a build. If false, then a webhook event that matches the pattern
triggers a build.
A filter used to determine which webhooks trigger a build.
" + "documentation":"A filter used to determine which webhooks trigger a build.
" }, "WebhookFilterType":{ "type":"string", diff --git a/botocore/data/cognito-identity/2014-06-30/endpoint-rule-set-1.json b/botocore/data/cognito-identity/2014-06-30/endpoint-rule-set-1.json index 348fc5c4bb..96572525e5 100644 --- a/botocore/data/cognito-identity/2014-06-30/endpoint-rule-set-1.json +++ b/botocore/data/cognito-identity/2014-06-30/endpoint-rule-set-1.json @@ -166,6 +166,82 @@ } ], "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "us-east-1" + ] + } + ], + "endpoint": { + "url": "https://cognito-identity-fips.us-east-1.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "us-east-2" + ] + } + ], + "endpoint": { + "url": "https://cognito-identity-fips.us-east-2.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "us-west-1" + ] + } + ], + "endpoint": { + "url": "https://cognito-identity-fips.us-west-1.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "us-west-2" + ] + } + ], + "endpoint": { + "url": "https://cognito-identity-fips.us-west-2.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [], "endpoint": { @@ -270,6 +346,31 @@ } ], "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://cognito-identity.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [], "endpoint": { diff --git a/botocore/data/cognito-idp/2016-04-18/endpoint-rule-set-1.json b/botocore/data/cognito-idp/2016-04-18/endpoint-rule-set-1.json index 0b7d7c8290..5e76d91815 100644 --- a/botocore/data/cognito-idp/2016-04-18/endpoint-rule-set-1.json +++ b/botocore/data/cognito-idp/2016-04-18/endpoint-rule-set-1.json @@ -166,6 +166,82 @@ } ], "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "us-east-1" + ] + } + ], + "endpoint": { + "url": "https://cognito-idp-fips.us-east-1.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "us-east-2" + ] + } + ], + "endpoint": { + "url": "https://cognito-idp-fips.us-east-2.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "us-west-1" + ] + } + ], + "endpoint": { + "url": "https://cognito-idp-fips.us-west-1.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "us-west-2" + ] + } + ], + "endpoint": { + "url": "https://cognito-idp-fips.us-west-2.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [], "endpoint": { @@ -270,6 +346,31 @@ } ], "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://cognito-idp.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [], "endpoint": { diff --git a/botocore/data/compute-optimizer/2019-11-01/service-2.json b/botocore/data/compute-optimizer/2019-11-01/service-2.json index 11bdef413d..39ec013fc2 100644 --- a/botocore/data/compute-optimizer/2019-11-01/service-2.json +++ b/botocore/data/compute-optimizer/2019-11-01/service-2.json @@ -611,6 +611,20 @@ "member":{"shape":"AccountId"} }, "AllocatedStorage":{"type":"integer"}, + "AllocationStrategy":{ + "type":"string", + "enum":[ + "Prioritized", + "LowestPrice" + ] + }, + "AsgType":{ + "type":"string", + "enum":[ + "SingleInstanceType", + "MixedInstanceTypes" + ] + }, "AutoScalingConfiguration":{ "type":"string", "enum":[ @@ -628,22 +642,38 @@ "members":{ "desiredCapacity":{ "shape":"DesiredCapacity", - "documentation":"The desired capacity, or number of instances, for the Auto Scaling group.
" + "documentation":"The desired capacity, or number of instances, for the EC2 Auto Scaling group.
" }, "minSize":{ "shape":"MinSize", - "documentation":"The minimum size, or minimum number of instances, for the Auto Scaling group.
" + "documentation":"The minimum size, or minimum number of instances, for the EC2 Auto Scaling group.
" }, "maxSize":{ "shape":"MaxSize", - "documentation":"The maximum size, or maximum number of instances, for the Auto Scaling group.
" + "documentation":"The maximum size, or maximum number of instances, for the EC2 Auto Scaling group.
" }, "instanceType":{ - "shape":"InstanceType", - "documentation":"The instance type for the Auto Scaling group.
" + "shape":"NullableInstanceType", + "documentation":"The instance type for the EC2 Auto Scaling group.
" + }, + "allocationStrategy":{ + "shape":"AllocationStrategy", + "documentation":"Describes the allocation strategy that the EC2 Auto Scaling group uses. This field is only available for EC2 Auto Scaling groups with mixed instance types.
" + }, + "estimatedInstanceHourReductionPercentage":{ + "shape":"NullableEstimatedInstanceHourReductionPercentage", + "documentation":"Describes the projected percentage reduction in instance hours after adopting the recommended configuration. This field is only available for EC2 Auto Scaling groups with scaling policies.
" + }, + "type":{ + "shape":"AsgType", + "documentation":"Describes whether the EC2 Auto Scaling group has a single instance type or a mixed instance type configuration.
" + }, + "mixedInstanceTypes":{ + "shape":"MixedInstanceTypes", + "documentation":"List the instance types within an EC2 Auto Scaling group that has mixed instance types.
" } }, - "documentation":"Describes the configuration of an Auto Scaling group.
" + "documentation":"Describes the configuration of an EC2 Auto Scaling group.
" }, "AutoScalingGroupEstimatedMonthlySavings":{ "type":"structure", @@ -1905,6 +1935,9 @@ "CurrentConfigurationDesiredCapacity", "CurrentConfigurationMinSize", "CurrentConfigurationMaxSize", + "CurrentConfigurationAllocationStrategy", + "CurrentConfigurationMixedInstanceTypes", + "CurrentConfigurationType", "CurrentOnDemandPrice", "CurrentStandardOneYearNoUpfrontReservedPrice", "CurrentStandardThreeYearNoUpfrontReservedPrice", @@ -1916,6 +1949,10 @@ "RecommendationOptionsConfigurationDesiredCapacity", "RecommendationOptionsConfigurationMinSize", "RecommendationOptionsConfigurationMaxSize", + "RecommendationOptionsConfigurationEstimatedInstanceHourReductionPercentage", + "RecommendationOptionsConfigurationAllocationStrategy", + "RecommendationOptionsConfigurationMixedInstanceTypes", + "RecommendationOptionsConfigurationType", "RecommendationOptionsProjectedUtilizationMetricsCpuMaximum", "RecommendationOptionsProjectedUtilizationMetricsMemoryMaximum", "RecommendationOptionsPerformanceRisk", @@ -4223,9 +4260,16 @@ "exception":true, "synthetic":true }, + "MixedInstanceType":{"type":"string"}, + "MixedInstanceTypes":{ + "type":"list", + "member":{"shape":"MixedInstanceType"} + }, "NextToken":{"type":"string"}, "NullableCpu":{"type":"integer"}, + "NullableEstimatedInstanceHourReductionPercentage":{"type":"double"}, "NullableIOPS":{"type":"integer"}, + "NullableInstanceType":{"type":"string"}, "NullableMaxAllocatedStorage":{"type":"integer"}, "NullableMemory":{"type":"integer"}, "NullableMemoryReservation":{"type":"integer"}, diff --git a/botocore/data/connect/2017-08-08/service-2.json b/botocore/data/connect/2017-08-08/service-2.json index 1befa2930d..7bb6e8b8ae 100644 --- a/botocore/data/connect/2017-08-08/service-2.json +++ b/botocore/data/connect/2017-08-08/service-2.json @@ -275,7 +275,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":">Associates a set of proficiencies with a user.
" + "documentation":"Associates a set of proficiencies with a user.
" }, "BatchAssociateAnalyticsDataSet":{ "name":"BatchAssociateAnalyticsDataSet", @@ -499,7 +499,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ThrottlingException"} ], - "documentation":"Publishes a new version of the flow provided. Versions are immutable and monotonically increasing. If a version of the same flow content already exists, no new version is created and instead the existing version number is returned. If the FlowContentSha256
provided is different from the FlowContentSha256
of the $LATEST
published flow content, then an error is returned. This API only supports creating versions for flows of type Campaign
.
Publishes a new version of the flow provided. Versions are immutable and monotonically increasing. If the FlowContentSha256
provided is different from the FlowContentSha256
of the $LATEST
published flow content, then an error is returned. This API only supports creating versions for flows of type Campaign
.
Deletes the specified flow module.
" }, + "DeleteContactFlowVersion":{ + "name":"DeleteContactFlowVersion", + "http":{ + "method":"DELETE", + "requestUri":"/contact-flows/{InstanceId}/{ContactFlowId}/version/{ContactFlowVersion}" + }, + "input":{"shape":"DeleteContactFlowVersionRequest"}, + "output":{"shape":"DeleteContactFlowVersionResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"Deletes the particular version specified in flow version identifier.
" + }, "DeleteEmailAddress":{ "name":"DeleteEmailAddress", "http":{ @@ -1203,7 +1221,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"Deletes a queue. It isn't possible to delete a queue by using the Amazon Connect admin website.
" + "documentation":"Deletes a queue.
" }, "DeleteQuickConnect":{ "name":"DeleteQuickConnect", @@ -1493,7 +1511,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"Describes the specified flow.
You can also create and update flows using the Amazon Connect Flow language.
Use the $SAVED
alias in the request to describe the SAVED
content of a Flow. For example, arn:aws:.../contact-flow/{id}:$SAVED
. After a flow is published, $SAVED
needs to be supplied to view saved content that has not been published.
In the response, Status indicates the flow status as either SAVED
or PUBLISHED
. The PUBLISHED
status will initiate validation on the content. SAVED
does not initiate validation of the content. SAVED
| PUBLISHED
Describes the specified flow.
You can also create and update flows using the Amazon Connect Flow language.
Use the $SAVED
alias in the request to describe the SAVED
content of a Flow. For example, arn:aws:.../contact-flow/{id}:$SAVED
. After a flow is published, $SAVED
needs to be supplied to view saved content that has not been published.
Use arn:aws:.../contact-flow/{id}:{version}
to retrieve the content of a specific flow version.
In the response, Status indicates the flow status as either SAVED
or PUBLISHED
. The PUBLISHED
status will initiate validation on the content. SAVED
does not initiate validation of the content. SAVED
| PUBLISHED
Indicates the checksum value of the flow content.
" + "documentation":"Indicates the checksum value of the latest published flow content.
" } } }, @@ -7863,6 +7882,10 @@ "shape":"FlowContentSha256", "documentation":"Indicates the checksum value of the flow content.
" }, + "ContactFlowVersion":{ + "shape":"ResourceVersion", + "documentation":"The identifier of the flow version.
" + }, "LastModifiedTime":{ "shape":"Timestamp", "documentation":"The Amazon Web Services Region where this resource was last modified.
" @@ -9617,6 +9640,39 @@ "members":{ } }, + "DeleteContactFlowVersionRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "ContactFlowId", + "ContactFlowVersion" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.
", + "location":"uri", + "locationName":"InstanceId" + }, + "ContactFlowId":{ + "shape":"ARN", + "documentation":"The identifier of the flow.
", + "location":"uri", + "locationName":"ContactFlowId" + }, + "ContactFlowVersion":{ + "shape":"ResourceVersion", + "documentation":"The identifier of the flow version.
", + "location":"uri", + "locationName":"ContactFlowVersion" + } + } + }, + "DeleteContactFlowVersionResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteEmailAddressRequest":{ "type":"structure", "required":[ diff --git a/botocore/data/datasync/2018-11-09/service-2.json b/botocore/data/datasync/2018-11-09/service-2.json index 547f692751..0f74881887 100644 --- a/botocore/data/datasync/2018-11-09/service-2.json +++ b/botocore/data/datasync/2018-11-09/service-2.json @@ -211,7 +211,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"Creates a transfer location for a Server Message Block (SMB) file server. DataSync can use this location as a source or destination for transferring data.
Before you begin, make sure that you understand how DataSync accesses SMB file servers.
" + "documentation":"Creates a transfer location for a Server Message Block (SMB) file server. DataSync can use this location as a source or destination for transferring data.
Before you begin, make sure that you understand how DataSync accesses SMB file servers. For more information, see Providing DataSync access to SMB file servers.
" }, "CreateTask":{ "name":"CreateTask", @@ -1679,30 +1679,28 @@ "required":[ "Subdirectory", "ServerHostname", - "User", - "Password", "AgentArns" ], "members":{ "Subdirectory":{ "shape":"SmbSubdirectory", - "documentation":"Specifies the name of the share exported by your SMB file server where DataSync will read or write data. You can include a subdirectory in the share path (for example, /path/to/subdirectory
). Make sure that other SMB clients in your network can also mount this path.
To copy all data in the subdirectory, DataSync must be able to mount the SMB share and access all of its data. For more information, see required permissions for SMB locations.
" + "documentation":"Specifies the name of the share exported by your SMB file server where DataSync will read or write data. You can include a subdirectory in the share path (for example, /path/to/subdirectory
). Make sure that other SMB clients in your network can also mount this path.
To copy all data in the subdirectory, DataSync must be able to mount the SMB share and access all of its data. For more information, see Providing DataSync access to SMB file servers.
" }, "ServerHostname":{ "shape":"ServerHostname", - "documentation":"Specifies the Domain Name Service (DNS) name or IP address of the SMB file server that your DataSync agent will mount.
You can't specify an IP version 6 (IPv6) address.
Specifies the domain name or IP address of the SMB file server that your DataSync agent will mount.
Remember the following when configuring this parameter:
You can't specify an IP version 6 (IPv6) address.
If you're using Kerberos authentication, you must specify a domain name.
Specifies the user that can mount and access the files, folders, and file metadata in your SMB file server.
For information about choosing a user with the right level of access for your transfer, see required permissions for SMB locations.
" + "documentation":"Specifies the user that can mount and access the files, folders, and file metadata in your SMB file server. This parameter applies only if AuthenticationType
is set to NTLM
.
For information about choosing a user with the right level of access for your transfer, see Providing DataSync access to SMB file servers.
" }, "Domain":{ "shape":"SmbDomain", - "documentation":"Specifies the name of the Active Directory domain that your SMB file server belongs to.
If you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right file server.
" + "documentation":"Specifies the Windows domain name that your SMB file server belongs to. This parameter applies only if AuthenticationType
is set to NTLM
.
If you have multiple domains in your environment, configuring this parameter makes sure that DataSync connects to the right file server.
" }, "Password":{ "shape":"SmbPassword", - "documentation":"Specifies the password of the user who can mount your SMB file server and has permission to access the files and folders involved in your transfer.
For more information, see required permissions for SMB locations.
" + "documentation":"Specifies the password of the user who can mount your SMB file server and has permission to access the files and folders involved in your transfer. This parameter applies only if AuthenticationType
is set to NTLM
.
Specifies labels that help you categorize, filter, and search for your Amazon Web Services resources. We recommend creating at least a name tag for your location.
" + }, + "AuthenticationType":{ + "shape":"SmbAuthenticationType", + "documentation":"Specifies the authentication protocol that DataSync uses to connect to your SMB file server. DataSync supports NTLM
(default) and KERBEROS
authentication.
Specifies the IPv4 addresses for the DNS servers that your SMB file server belongs to. This parameter applies only if AuthenticationType
is set to KERBEROS
.
If you have multiple domains in your environment, configuring this parameter makes sure that DataSync connects to the right SMB file server.
" + }, + "KerberosPrincipal":{ + "shape":"KerberosPrincipal", + "documentation":"Specifies a service principal name (SPN), which is an identity in your Kerberos realm that has permission to access the files, folders, and file metadata in your SMB file server.
SPNs are case sensitive and must include a prepended cifs/
. For example, an SPN might look like cifs/kerberosuser@EXAMPLE.COM
.
Your task execution will fail if the SPN that you provide for this parameter doesn’t match what’s exactly in your keytab or krb5.conf
files.
Specifies your Kerberos key table (keytab) file, which includes mappings between your service principal name (SPN) and encryption keys.
You can specify the keytab using a file path (for example, file://path/to/file.keytab
). The file must be base64 encoded. If you're using the CLI, the encoding is done for you.
To avoid task execution errors, make sure that the SPN in the keytab file matches exactly what you specify for KerberosPrincipal
and in your krb5.conf
file.
Specifies a Kerberos configuration file (krb5.conf
) that defines your Kerberos realm configuration.
You can specify the krb5.conf
using a file path (for example, file://path/to/krb5.conf
). The file must be base64 encoded. If you're using the CLI, the encoding is done for you.
To avoid task execution errors, make sure that the service principal name (SPN) in the krb5.conf
file matches exactly what you specify for KerberosPrincipal
and in your keytab file.
CreateLocationSmbRequest
" @@ -2393,19 +2411,31 @@ }, "User":{ "shape":"SmbUser", - "documentation":"The user that can mount and access the files, folders, and file metadata in your SMB file server.
" + "documentation":"The user that can mount and access the files, folders, and file metadata in your SMB file server. This element applies only if AuthenticationType
is set to NTLM
.
The name of the Microsoft Active Directory domain that the SMB file server belongs to.
" + "documentation":"The name of the Windows domain that the SMB file server belongs to. This element applies only if AuthenticationType
is set to NTLM
.
The protocol that DataSync use to access your SMB file.
" + "documentation":"The SMB protocol version that DataSync uses to access your SMB file server.
" }, "CreationTime":{ "shape":"Time", "documentation":"The time that the SMB location was created.
" + }, + "DnsIpAddresses":{ + "shape":"DnsIpList", + "documentation":"The IPv4 addresses for the DNS servers that your SMB file server belongs to. This element applies only if AuthenticationType
is set to KERBEROS
.
The Kerberos service principal name (SPN) that has permission to access the files, folders, and file metadata in your SMB file server.
" + }, + "AuthenticationType":{ + "shape":"SmbAuthenticationType", + "documentation":"The authentication protocol that DataSync uses to connect to your SMB file server.
" } }, "documentation":"DescribeLocationSmbResponse
" @@ -2867,6 +2897,11 @@ "enum":["NetAppONTAP"] }, "DiscoveryTime":{"type":"timestamp"}, + "DnsIpList":{ + "type":"list", + "member":{"shape":"ServerIpAddress"}, + "max":2 + }, "Duration":{ "type":"long", "min":0 @@ -3074,7 +3109,7 @@ "type":"structure", "members":{ "Domain":{ - "shape":"FsxUpdateSmbDomain", + "shape":"UpdateSmbDomain", "documentation":"Specifies the name of the Windows domain that your storage virtual machine (SVM) belongs to.
If you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right SVM.
" }, "MountOptions":{"shape":"SmbMountOptions"}, @@ -3089,11 +3124,6 @@ }, "documentation":"Specifies the Server Message Block (SMB) protocol configuration that DataSync uses to access your Amazon FSx for NetApp ONTAP file system's storage virtual machine (SVM). For more information, see Providing DataSync access to FSx for ONTAP file systems.
" }, - "FsxUpdateSmbDomain":{ - "type":"string", - "max":253, - "pattern":"^([A-Za-z0-9]((\\.|-+)?[A-Za-z0-9]){0,252})?$" - }, "FsxWindowsSubdirectory":{ "type":"string", "max":4096, @@ -3944,7 +3974,7 @@ "type":"string", "max":63, "min":3, - "pattern":"^[a-zA-Z0-9_\\-\\+\\./\\(\\)\\$\\p{Zs}]+$" + "pattern":"^[a-zA-Z0-9_\\-\\+\\.\\(\\)\\$\\p{Zs}]+$" }, "ObjectStorageCertificate":{ "type":"blob", @@ -4523,6 +4553,19 @@ "max":255, "pattern":"^(([a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9\\-]*[A-Za-z0-9])$" }, + "ServerIpAddress":{ + "type":"string", + "max":15, + "min":7, + "pattern":"\\A(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)(\\.(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)){3}\\z" + }, + "SmbAuthenticationType":{ + "type":"string", + "enum":[ + "NTLM", + "KERBEROS" + ] + }, "SmbDomain":{ "type":"string", "max":253, @@ -4560,7 +4603,7 @@ "SmbUser":{ "type":"string", "max":104, - "pattern":"^[^\\x5B\\x5D\\\\/:;|=,+*?]{1,104}$" + "pattern":"^[^\\x22\\x5B\\x5D/\\\\:;|=,+*?\\x3C\\x3E]{1,104}$" }, "SmbVersion":{ "type":"string", @@ -5293,7 +5336,7 @@ "documentation":"Specifies a mount path for your file system using forward slashes. DataSync uses this subdirectory to read or write data (depending on whether the file system is a source or destination location).
" }, "Domain":{ - "shape":"FsxUpdateSmbDomain", + "shape":"UpdateSmbDomain", "documentation":"Specifies the name of the Windows domain that your FSx for Windows File Server file system belongs to.
If you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right file system.
" }, "User":{ @@ -5472,25 +5515,45 @@ }, "Subdirectory":{ "shape":"SmbSubdirectory", - "documentation":"Specifies the name of the share exported by your SMB file server where DataSync will read or write data. You can include a subdirectory in the share path (for example, /path/to/subdirectory
). Make sure that other SMB clients in your network can also mount this path.
To copy all data in the specified subdirectory, DataSync must be able to mount the SMB share and access all of its data. For more information, see required permissions for SMB locations.
" + "documentation":"Specifies the name of the share exported by your SMB file server where DataSync will read or write data. You can include a subdirectory in the share path (for example, /path/to/subdirectory
). Make sure that other SMB clients in your network can also mount this path.
To copy all data in the specified subdirectory, DataSync must be able to mount the SMB share and access all of its data. For more information, see Providing DataSync access to SMB file servers.
" }, "User":{ "shape":"SmbUser", - "documentation":"Specifies the user name that can mount your SMB file server and has permission to access the files and folders involved in your transfer.
For information about choosing a user with the right level of access for your transfer, see required permissions for SMB locations.
" + "documentation":"Specifies the user name that can mount your SMB file server and has permission to access the files and folders involved in your transfer. This parameter applies only if AuthenticationType
is set to NTLM
.
For information about choosing a user with the right level of access for your transfer, see Providing DataSync access to SMB file servers.
" }, "Domain":{ "shape":"SmbDomain", - "documentation":"Specifies the Windows domain name that your SMB file server belongs to.
If you have multiple domains in your environment, configuring this parameter makes sure that DataSync connects to the right file server.
For more information, see required permissions for SMB locations.
" + "documentation":"Specifies the Windows domain name that your SMB file server belongs to. This parameter applies only if AuthenticationType
is set to NTLM
.
If you have multiple domains in your environment, configuring this parameter makes sure that DataSync connects to the right file server.
" }, "Password":{ "shape":"SmbPassword", - "documentation":"Specifies the password of the user who can mount your SMB file server and has permission to access the files and folders involved in your transfer.
For more information, see required permissions for SMB locations.
" + "documentation":"Specifies the password of the user who can mount your SMB file server and has permission to access the files and folders involved in your transfer. This parameter applies only if AuthenticationType
is set to NTLM
.
Specifies the DataSync agent (or agents) that can connect to your SMB file server. You specify an agent by using its Amazon Resource Name (ARN).
" }, - "MountOptions":{"shape":"SmbMountOptions"} + "MountOptions":{"shape":"SmbMountOptions"}, + "AuthenticationType":{ + "shape":"SmbAuthenticationType", + "documentation":"Specifies the authentication protocol that DataSync uses to connect to your SMB file server. DataSync supports NTLM
(default) and KERBEROS
authentication.
Specifies the IPv4 addresses for the DNS servers that your SMB file server belongs to. This parameter applies only if AuthenticationType
is set to KERBEROS
.
If you have multiple domains in your environment, configuring this parameter makes sure that DataSync connects to the right SMB file server.
" + }, + "KerberosPrincipal":{ + "shape":"KerberosPrincipal", + "documentation":"Specifies a service principal name (SPN), which is an identity in your Kerberos realm that has permission to access the files, folders, and file metadata in your SMB file server.
SPNs are case sensitive and must include a prepended cifs/
. For example, an SPN might look like cifs/kerberosuser@EXAMPLE.COM
.
Your task execution will fail if the SPN that you provide for this parameter doesn’t match what’s exactly in your keytab or krb5.conf
files.
Specifies your Kerberos key table (keytab) file, which includes mappings between your service principal name (SPN) and encryption keys.
You can specify the keytab using a file path (for example, file://path/to/file.keytab
). The file must be base64 encoded. If you're using the CLI, the encoding is done for you.
To avoid task execution errors, make sure that the SPN in the keytab file matches exactly what you specify for KerberosPrincipal
and in your krb5.conf
file.
Specifies a Kerberos configuration file (krb5.conf
) that defines your Kerberos realm configuration.
You can specify the krb5.conf
using a file path (for example, file://path/to/krb5.conf
). The file must be base64 encoded. If you're using the CLI, the encoding is done for you.
To avoid task execution errors, make sure that the service principal name (SPN) in the krb5.conf
file matches exactly what you specify for KerberosPrincipal
and in your keytab file.
Creates a job. A job is a set of instructions that AWS Deadline Cloud uses to schedule and run work on available workers. For more information, see Deadline Cloud jobs.
", + "documentation":"Creates a job. A job is a set of instructions that Deadline Cloud uses to schedule and run work on available workers. For more information, see Deadline Cloud jobs.
", "endpoint":{"hostPrefix":"management."}, "idempotent":true }, @@ -337,6 +337,27 @@ "endpoint":{"hostPrefix":"management."}, "idempotent":true }, + "CreateLimit":{ + "name":"CreateLimit", + "http":{ + "method":"POST", + "requestUri":"/2023-10-12/farms/{farmId}/limits", + "responseCode":200 + }, + "input":{"shape":"CreateLimitRequest"}, + "output":{"shape":"CreateLimitResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"Creates a limit that manages the distribution of shared resources, such as floating licenses. A limit can throttle work assignments, help manage workloads, and track current usage. Before you use a limit, you must associate the limit with one or more queues.
You must add the amountRequirementName
to a step in a job template to declare the limit requirement.
Associates a limit with a particular queue. After the limit is associated, all workers for jobs that specify the limit associated with the queue are subject to the limit. You can't associate two limits with the same amountRequirementName
to the same queue.
Removes a limit from the specified farm. Before you delete a limit you must use the DeleteQueueLimitAssociation
operation to remove the association with any queues.
Removes the association between a queue and a limit. You must use the UpdateQueueLimitAssociation
operation to set the status to STOP_LIMIT_USAGE_AND_COMPLETE_TASKS
or STOP_LIMIT_USAGE_AND_CANCEL_TASKS
. The status does not change immediately. Use the GetQueueLimitAssociation
operation to see if the status changed to STOPPED
before deleting the association.
Gets a licence endpoint.
", "endpoint":{"hostPrefix":"management."} }, + "GetLimit":{ + "name":"GetLimit", + "http":{ + "method":"GET", + "requestUri":"/2023-10-12/farms/{farmId}/limits/{limitId}", + "responseCode":200 + }, + "input":{"shape":"GetLimitRequest"}, + "output":{"shape":"GetLimitResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"Gets information about a specific limit.
", + "endpoint":{"hostPrefix":"management."} + }, "GetMonitor":{ "name":"GetMonitor", "http":{ @@ -937,6 +1037,25 @@ "documentation":"Gets a queue-fleet association.
", "endpoint":{"hostPrefix":"management."} }, + "GetQueueLimitAssociation":{ + "name":"GetQueueLimitAssociation", + "http":{ + "method":"GET", + "requestUri":"/2023-10-12/farms/{farmId}/queue-limit-associations/{queueId}/{limitId}", + "responseCode":200 + }, + "input":{"shape":"GetQueueLimitAssociationRequest"}, + "output":{"shape":"GetQueueLimitAssociationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"Gets information about a specific association between a queue and a limit.
", + "endpoint":{"hostPrefix":"management."} + }, "GetSession":{ "name":"GetSession", "http":{ @@ -1275,6 +1394,25 @@ "documentation":"Lists license endpoints.
", "endpoint":{"hostPrefix":"management."} }, + "ListLimits":{ + "name":"ListLimits", + "http":{ + "method":"GET", + "requestUri":"/2023-10-12/farms/{farmId}/limits", + "responseCode":200 + }, + "input":{"shape":"ListLimitsRequest"}, + "output":{"shape":"ListLimitsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"Gets a list of limits defined in the specified farm.
", + "endpoint":{"hostPrefix":"management."} + }, "ListMeteredProducts":{ "name":"ListMeteredProducts", "http":{ @@ -1349,6 +1487,24 @@ "documentation":"Lists queue-fleet associations.
", "endpoint":{"hostPrefix":"management."} }, + "ListQueueLimitAssociations":{ + "name":"ListQueueLimitAssociations", + "http":{ + "method":"GET", + "requestUri":"/2023-10-12/farms/{farmId}/queue-limit-associations", + "responseCode":200 + }, + "input":{"shape":"ListQueueLimitAssociationsRequest"}, + "output":{"shape":"ListQueueLimitAssociationsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"Gets a list of the associations between queues and limits defined in a farm.
", + "endpoint":{"hostPrefix":"management."} + }, "ListQueueMembers":{ "name":"ListQueueMembers", "http":{ @@ -1834,6 +1990,26 @@ "endpoint":{"hostPrefix":"management."}, "idempotent":true }, + "UpdateLimit":{ + "name":"UpdateLimit", + "http":{ + "method":"PATCH", + "requestUri":"/2023-10-12/farms/{farmId}/limits/{limitId}", + "responseCode":200 + }, + "input":{"shape":"UpdateLimitRequest"}, + "output":{"shape":"UpdateLimitResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"Updates the properties of the specified limit.
", + "endpoint":{"hostPrefix":"management."}, + "idempotent":true + }, "UpdateMonitor":{ "name":"UpdateMonitor", "http":{ @@ -1913,6 +2089,26 @@ "endpoint":{"hostPrefix":"management."}, "idempotent":true }, + "UpdateQueueLimitAssociation":{ + "name":"UpdateQueueLimitAssociation", + "http":{ + "method":"PATCH", + "requestUri":"/2023-10-12/farms/{farmId}/queue-limit-associations/{queueId}/{limitId}", + "responseCode":200 + }, + "input":{"shape":"UpdateQueueLimitAssociationRequest"}, + "output":{"shape":"UpdateQueueLimitAssociationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"Updates the status of the queue. If you set the status to one of the STOP_LIMIT_USAGE*
values, there will be a delay before the status transitions to the STOPPED
state.
A list of objects that contain the GPU name of the accelerator and driver for the instance types that support the accelerator.
" + "documentation":"A list of accelerator capabilities requested for this fleet. Only Amazon Elastic Compute Cloud instances that provide these capabilities will be used. For example, if you specify both L4 and T4 chips, Deadline Cloud will use Amazon EC2 instances that have either the L4 or the T4 chip installed.
" }, "count":{ "shape":"AcceleratorCountRange", - "documentation":"The number of GPUs on each worker. The default is 1.
" + "documentation":"The number of GPU accelerators specified for worker hosts in this fleet.
" } }, - "documentation":"Provides information about the GPU accelerators and drivers for the instance types in a fleet. If you include the acceleratorCapabilities
property in the ServiceManagedEc2InstanceCapabilities object, all of the Amazon EC2 instances will have at least one accelerator.
Provides information about the GPU accelerators used for jobs processed by a fleet.
" }, "AcceleratorCountRange":{ "type":"structure", @@ -2061,14 +2257,14 @@ "members":{ "min":{ "shape":"MinZeroMaxInteger", - "documentation":"The minimum number of GPUs for the accelerator. If you set the value to 0, a worker will still have 1 GPU.
" + "documentation":"The minimum number of GPU accelerators in the worker host.
" }, "max":{ "shape":"MinZeroMaxInteger", - "documentation":"The maximum number of GPUs for the accelerator.
" + "documentation":"The maximum number of GPU accelerators in the worker host.
" } }, - "documentation":"The range for the GPU fleet acceleration.
" + "documentation":"Defines the maximum and minimum number of GPU accelerators required for a worker instance..
" }, "AcceleratorName":{ "type":"string", @@ -2090,14 +2286,14 @@ "members":{ "name":{ "shape":"AcceleratorName", - "documentation":"The name of the GPU accelerator.
" + "documentation":"The name of the chip used by the GPU accelerator.
If you specify l4
as the name of the accelerator, you must specify latest
or grid:r550
as the runtime.
The available GPU accelerators are:
t4
- NVIDIA T4 Tensor Core GPU
a10g
- NVIDIA A10G Tensor Core GPU
l4
- NVIDIA L4 Tensor Core GPU
l40s
- NVIDIA L40S Tensor Core GPU
The driver version that the GPU accelerator uses.
" + "documentation":"Specifies the runtime driver to use for the GPU accelerator. You must use the same runtime for all GPUs.
You can choose from the following runtimes:
latest
- Use the latest runtime available for the chip. If you specify latest
and a new version of the runtime is released, the new version of the runtime is used.
grid:r550
- NVIDIA vGPU software 17
grid:r535
- NVIDIA vGPU software 16
If you don't specify a runtime, Deadline Cloud uses latest
as the default. However, if you have multiple accelerators and specify latest
for some and leave others blank, Deadline Cloud raises an exception.
Values that you can use to select a particular Amazon EC2 instance type.
" + "documentation":"Describes a specific GPU accelerator required for an Amazon Elastic Compute Cloud worker host.
" }, "AcceleratorSelections":{ "type":"list", @@ -2116,7 +2312,7 @@ "documentation":"The maximum amount of memory to use for the accelerator, measured in MiB.
" } }, - "documentation":"The range for memory, in MiB, to use for the accelerator.
" + "documentation":"Defines the maximum and minimum amount of memory, in MiB, to use for the accelerator.
" }, "AcceleratorType":{ "type":"string", @@ -2147,6 +2343,28 @@ "type":"string", "sensitive":true }, + "AcquiredLimit":{ + "type":"structure", + "required":[ + "limitId", + "count" + ], + "members":{ + "limitId":{ + "shape":"LimitId", + "documentation":"The unique identifier of the limit.
" + }, + "count":{ + "shape":"MinOneMaxInteger", + "documentation":"The number of limit resources used.
" + } + }, + "documentation":"Provides information about the number of resources used.
" + }, + "AcquiredLimits":{ + "type":"list", + "member":{"shape":"AcquiredLimit"} + }, "AggregationId":{ "type":"string", "pattern":"[0-9a-f]{32}" @@ -2163,6 +2381,11 @@ "min":1, "pattern":"([a-zA-Z][a-zA-Z0-9]{0,63}:)?amount(\\.[a-zA-Z][a-zA-Z0-9]{0,63})+" }, + "AmountRequirementName":{ + "type":"string", + "max":1024, + "min":0 + }, "AssignedEnvironmentEnterSessionActionDefinition":{ "type":"structure", "required":["environmentId"], @@ -3328,6 +3551,10 @@ "shape":"MaxRetriesPerTask", "documentation":"The maximum number of retries for each task.
" }, + "maxWorkerCount":{ + "shape":"MaxWorkerCount", + "documentation":"The maximum number of worker hosts that can concurrently process a job. When the maxWorkerCount
is reached, no more workers will be assigned to process the job, even if the fleets assigned to the job's queue has available workers.
You can't set the maxWorkerCount
to 0. If you set it to -1, there is no maximum number of workers.
If you don't specify the maxWorkerCount
, Deadline Cloud won't throttle the number of workers used to process the job.
The job ID for the source job.
" @@ -3406,6 +3633,56 @@ } } }, + "CreateLimitRequest":{ + "type":"structure", + "required":[ + "displayName", + "amountRequirementName", + "maxCount", + "farmId" + ], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"The unique token which the server uses to recognize retries of the same request.
", + "idempotencyToken":true, + "location":"header", + "locationName":"X-Amz-Client-Token" + }, + "displayName":{ + "shape":"ResourceName", + "documentation":"The display name of the limit.
This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.
The value that you specify as the name
in the amounts
field of the hostRequirements
in a step of a job template to declare the limit requirement.
The maximum number of resources constrained by this limit. When all of the resources are in use, steps that require the limit won't be scheduled until the resource is available.
The maxCount
must not be 0. If the value is -1, there is no restriction on the number of resources that can be acquired for this limit.
The farm ID of the farm that contains the limit.
", + "location":"uri", + "locationName":"farmId" + }, + "description":{ + "shape":"Description", + "documentation":"A description of the limit. A description helps you identify the purpose of the limit.
This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.
A unique identifier for the limit. Use this identifier in other operations, such as CreateQueueLimitAssociation
and DeleteLimit
.
The unique identifier of the farm that contains the queue and limit to associate.
", + "location":"uri", + "locationName":"farmId" + }, + "queueId":{ + "shape":"QueueId", + "documentation":"The unique identifier of the queue to associate with the limit.
" + }, + "limitId":{ + "shape":"LimitId", + "documentation":"The unique identifier of the limit to associate with the queue.
" + } + } + }, + "CreateQueueLimitAssociationResponse":{ + "type":"structure", + "members":{ + } + }, "CreateQueueRequest":{ "type":"structure", "required":[ @@ -3913,6 +4219,32 @@ "members":{ } }, + "DeleteLimitRequest":{ + "type":"structure", + "required":[ + "farmId", + "limitId" + ], + "members":{ + "farmId":{ + "shape":"FarmId", + "documentation":"The unique identifier of the farm that contains the limit to delete.
", + "location":"uri", + "locationName":"farmId" + }, + "limitId":{ + "shape":"LimitId", + "documentation":"The unique identifier of the limit to delete.
", + "location":"uri", + "locationName":"limitId" + } + } + }, + "DeleteLimitResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteMeteredProductRequest":{ "type":"structure", "required":[ @@ -4022,6 +4354,39 @@ "members":{ } }, + "DeleteQueueLimitAssociationRequest":{ + "type":"structure", + "required":[ + "farmId", + "queueId", + "limitId" + ], + "members":{ + "farmId":{ + "shape":"FarmId", + "documentation":"The unique identifier of the farm that contains the queue and limit to disassociate.
", + "location":"uri", + "locationName":"farmId" + }, + "queueId":{ + "shape":"QueueId", + "documentation":"The unique identifier of the queue to disassociate.
", + "location":"uri", + "locationName":"queueId" + }, + "limitId":{ + "shape":"LimitId", + "documentation":"The unique identifier of the limit to disassociate.
", + "location":"uri", + "locationName":"limitId" + } + } + }, + "DeleteQueueLimitAssociationResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteQueueRequest":{ "type":"structure", "required":[ @@ -4288,10 +4653,7 @@ "members":{ } }, - "DnsName":{ - "type":"string", - "pattern":"vpce-[\\w]+-[\\w]+.vpce-svc-[\\w]+.*.vpce.amazonaws.com" - }, + "DnsName":{"type":"string"}, "Document":{ "type":"structure", "members":{ @@ -5165,8 +5527,8 @@ "type":"structure", "required":[ "farmId", - "jobId", - "queueId" + "queueId", + "jobId" ], "members":{ "farmId":{ @@ -5175,17 +5537,17 @@ "location":"uri", "locationName":"farmId" }, - "jobId":{ - "shape":"JobId", - "documentation":"The job ID.
", - "location":"uri", - "locationName":"jobId" - }, "queueId":{ "shape":"QueueId", "documentation":"The queue ID associated with the job.
", "location":"uri", "locationName":"queueId" + }, + "jobId":{ + "shape":"JobId", + "documentation":"The job ID.
", + "location":"uri", + "locationName":"jobId" } } }, @@ -5281,6 +5643,10 @@ "shape":"JobDescription", "documentation":"The description of the job.
This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.
The maximum number of worker hosts that can concurrently process a job. When the maxWorkerCount
is reached, no more workers will be assigned to process the job, even if the fleets assigned to the job's queue has available workers.
If you don't set the maxWorkerCount
when you create a job, this value is not returned in the response.
The job ID for the source job.
" @@ -5349,6 +5715,86 @@ "max":10, "min":1 }, + "GetLimitRequest":{ + "type":"structure", + "required":[ + "farmId", + "limitId" + ], + "members":{ + "farmId":{ + "shape":"FarmId", + "documentation":"The unique identifier of the farm that contains the limit.
", + "location":"uri", + "locationName":"farmId" + }, + "limitId":{ + "shape":"LimitId", + "documentation":"The unique identifier of the limit to return.
", + "location":"uri", + "locationName":"limitId" + } + } + }, + "GetLimitResponse":{ + "type":"structure", + "required":[ + "displayName", + "amountRequirementName", + "maxCount", + "createdAt", + "createdBy", + "farmId", + "limitId", + "currentCount" + ], + "members":{ + "displayName":{ + "shape":"ResourceName", + "documentation":"The display name of the limit.
This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.
The value that you specify as the name
in the amounts
field of the hostRequirements
in a step of a job template to declare the limit requirement.
The maximum number of resources constrained by this limit. When all of the resources are in use, steps that require the limit won't be scheduled until the resource is available.
The maxValue
must not be 0. If the value is -1, there is no restriction on the number of resources that can be acquired for this limit.
The Unix timestamp of the date and time that the limit was created.
" + }, + "createdBy":{ + "shape":"CreatedBy", + "documentation":"The user identifier of the person that created the limit.
" + }, + "updatedAt":{ + "shape":"UpdatedAt", + "documentation":"The Unix timestamp of the date and time that the limit was last updated.
" + }, + "updatedBy":{ + "shape":"UpdatedBy", + "documentation":"The user identifier of the person that last updated the limit.
" + }, + "farmId":{ + "shape":"FarmId", + "documentation":"The unique identifier of the farm that contains the limit.
" + }, + "limitId":{ + "shape":"LimitId", + "documentation":"The unique identifier of the limit.
" + }, + "currentCount":{ + "shape":"MinZeroMaxInteger", + "documentation":"The number of resources from the limit that are being used by jobs. The result is delayed and may not be the count at the time that you called the operation.
" + }, + "description":{ + "shape":"Description", + "documentation":"The description of the limit that helps identify what the limit is used for.
This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.
The unique identifier of the farm that contains the associated queue and limit.
", + "location":"uri", + "locationName":"farmId" + }, + "queueId":{ + "shape":"QueueId", + "documentation":"The unique identifier of the queue associated with the limit.
", + "location":"uri", + "locationName":"queueId" + }, + "limitId":{ + "shape":"LimitId", + "documentation":"The unique identifier of the limit associated with the queue.
", + "location":"uri", + "locationName":"limitId" + } + } + }, + "GetQueueLimitAssociationResponse":{ + "type":"structure", + "required":[ + "createdAt", + "createdBy", + "queueId", + "limitId", + "status" + ], + "members":{ + "createdAt":{ + "shape":"CreatedAt", + "documentation":"The Unix timestamp of the date and time that the association was created.
" + }, + "createdBy":{ + "shape":"CreatedBy", + "documentation":"The user identifier of the person that created the association.
" + }, + "updatedAt":{ + "shape":"UpdatedAt", + "documentation":"The Unix timestamp of the date and time that the association was last updated.
" + }, + "updatedBy":{ + "shape":"UpdatedBy", + "documentation":"The user identifier of the person that last updated the association.
" + }, + "queueId":{ + "shape":"QueueId", + "documentation":"The unique identifier of the queue associated with the limit.
" + }, + "limitId":{ + "shape":"LimitId", + "documentation":"The unique identifier of the limit associated with the queue.
" + }, + "status":{ + "shape":"QueueLimitAssociationStatus", + "documentation":"The current status of the limit.
" + } + } + }, "GetQueueRequest":{ "type":"structure", "required":[ @@ -5749,6 +6263,10 @@ "definition":{ "shape":"SessionActionDefinition", "documentation":"The session action definition.
" + }, + "acquiredLimits":{ + "shape":"AcquiredLimits", + "documentation":"The limits and their amounts acquired during a session action. If no limits were acquired during the session, this field isn't returned.
" } } }, @@ -6271,18 +6789,14 @@ "GetWorkerResponse":{ "type":"structure", "required":[ - "workerId", "farmId", "fleetId", + "workerId", "status", "createdAt", "createdBy" ], "members":{ - "workerId":{ - "shape":"WorkerId", - "documentation":"The worker ID.
" - }, "farmId":{ "shape":"FarmId", "documentation":"The farm ID.
" @@ -6291,6 +6805,10 @@ "shape":"FleetId", "documentation":"The fleet ID.
" }, + "workerId":{ + "shape":"WorkerId", + "documentation":"The worker ID.
" + }, "hostProperties":{ "shape":"HostPropertiesResponse", "documentation":"The host properties for the worker.
" @@ -6878,6 +7396,10 @@ "shape":"JobParameters", "documentation":"The job parameters.
" }, + "maxWorkerCount":{ + "shape":"MaxWorkerCount", + "documentation":"The maximum number of worker hosts that can concurrently process a job. When the maxWorkerCount
is reached, no more workers will be assigned to process the job, even if the fleets assigned to the job's queue has available workers.
You can't set the maxWorkerCount
to 0. If you set it to -1, there is no maximum number of workers.
If you don't specify the maxWorkerCount
, the default is -1.
The job ID for the source job.
" @@ -6965,6 +7487,10 @@ "shape":"MaxRetriesPerTask", "documentation":"The maximum number of retries for a job.
" }, + "maxWorkerCount":{ + "shape":"MaxWorkerCount", + "documentation":"The maximum number of worker hosts that can concurrently process a job. When the maxWorkerCount
is reached, no more workers will be assigned to process the job, even if the fleets assigned to the job's queue has available workers.
You can't set the maxWorkerCount
to 0. If you set it to -1, there is no maximum number of workers.
If you don't specify the maxWorkerCount
, the default is -1.
The job ID for the source job.
" @@ -7040,6 +7566,70 @@ "documentation":"The details for a license endpoint.
" }, "LicenseProduct":{"type":"string"}, + "LimitId":{ + "type":"string", + "pattern":"limit-[0-9a-f]{32}" + }, + "LimitSummaries":{ + "type":"list", + "member":{"shape":"LimitSummary"} + }, + "LimitSummary":{ + "type":"structure", + "required":[ + "displayName", + "amountRequirementName", + "maxCount", + "createdAt", + "createdBy", + "farmId", + "limitId", + "currentCount" + ], + "members":{ + "displayName":{ + "shape":"ResourceName", + "documentation":"The name of the limit used in lists to identify the limit.
This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.
The value that you specify as the name
in the amounts
field of the hostRequirements
in a step of a job template to declare the limit requirement.
The maximum number of resources constrained by this limit. When all of the resources are in use, steps that require the limit won't be scheduled until the resource is available.
The maxValue
must not be 0. If the value is -1, there is no restriction on the number of resources that can be acquired for this limit.
The Unix timestamp of the date and time that the limit was created.
" + }, + "createdBy":{ + "shape":"CreatedBy", + "documentation":"The user identifier of the person that created the limit.
" + }, + "updatedAt":{ + "shape":"UpdatedAt", + "documentation":"The Unix timestamp of the date and time that the limit was last updated.
" + }, + "updatedBy":{ + "shape":"UpdatedBy", + "documentation":"The user identifier of the person that last updated the limit.
" + }, + "farmId":{ + "shape":"FarmId", + "documentation":"The unique identifier of the farm that contains the limit.
" + }, + "limitId":{ + "shape":"LimitId", + "documentation":"The unique identifier of the limit.
" + }, + "currentCount":{ + "shape":"MinZeroMaxInteger", + "documentation":"The number of resources from the limit that are being used by jobs. The result is delayed and may not be the count at the time that you called the operation.
" + } + }, + "documentation":"Provides information about a specific limit.
" + }, "ListAttributeCapabilityValue":{ "type":"list", "member":{"shape":"AttributeCapabilityValue"} @@ -7489,6 +8079,44 @@ } } }, + "ListLimitsRequest":{ + "type":"structure", + "required":["farmId"], + "members":{ + "farmId":{ + "shape":"FarmId", + "documentation":"The unique identifier of the farm that contains the limits.
", + "location":"uri", + "locationName":"farmId" + }, + "nextToken":{ + "shape":"String", + "documentation":"The token for the next set of results, or null
to start from the beginning.
The maximum number of limits to return in each page of results.
", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListLimitsResponse":{ + "type":"structure", + "required":["limits"], + "members":{ + "limits":{ + "shape":"LimitSummaries", + "documentation":"A list of limits that the farm contains.
" + }, + "nextToken":{ + "shape":"String", + "documentation":"If Deadline Cloud returns nextToken
, then there are more results available. The value of nextToken
is a unique pagination token for each page. To retrieve the next page, call the operation again using the returned token. Keep all other arguments unchanged. If no results remain, then nextToken
is set to null
. Each pagination token expires after 24 hours. If you provide a token that isn't valid, then you receive an HTTP 400 ValidationException
error.
The unique identifier of the farm that contains the limits and associations.
", + "location":"uri", + "locationName":"farmId" + }, + "queueId":{ + "shape":"QueueId", + "documentation":"Specifies that the operation should return only the queue limit associations for the specified queue. If you specify both the queueId
and the limitId
, only the specified limit is returned if it exists.
Specifies that the operation should return only the queue limit associations for the specified limit. If you specify both the queueId
and the limitId
, only the specified limit is returned if it exists.
The token for the next set of results, or null
to start from the beginning.
The maximum number of associations to return in each page of results.
", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListQueueLimitAssociationsResponse":{ + "type":"structure", + "required":["queueLimitAssociations"], + "members":{ + "queueLimitAssociations":{ + "shape":"QueueLimitAssociationSummaries", + "documentation":"A list of associations between limits and queues in the farm specified in the request.
" + }, + "nextToken":{ + "shape":"String", + "documentation":"If Deadline Cloud returns nextToken
, then there are more results available. The value of nextToken
is a unique pagination token for each page. To retrieve the next page, call the operation again using the returned token. Keep all other arguments unchanged. If no results remain, then nextToken
is set to null
. Each pagination token expires after 24 hours. If you provide a token that isn't valid, then you receive an HTTP 400 ValidationException
error.
The has value of the file.
" + "documentation":"The hash value of the file.
" } }, "documentation":"The details of the manifest that links a job's source information.
", @@ -8438,6 +9116,12 @@ "max":1024, "min":1 }, + "MaxCount":{ + "type":"integer", + "box":true, + "max":2147483647, + "min":-1 + }, "MaxFailedTasksCount":{ "type":"integer", "box":true, @@ -8456,6 +9140,12 @@ "max":2147483647, "min":0 }, + "MaxWorkerCount":{ + "type":"integer", + "box":true, + "max":2147483647, + "min":-1 + }, "MembershipLevel":{ "type":"string", "enum":[ @@ -8522,6 +9212,12 @@ "type":"list", "member":{"shape":"MeteredProductSummary"} }, + "MinOneMaxInteger":{ + "type":"integer", + "box":true, + "max":2147483647, + "min":1 + }, "MinOneMaxTenThousand":{ "type":"integer", "box":true, @@ -8912,6 +9608,60 @@ "type":"string", "pattern":"queue-[0-9a-f]{32}" }, + "QueueLimitAssociationStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "STOP_LIMIT_USAGE_AND_COMPLETE_TASKS", + "STOP_LIMIT_USAGE_AND_CANCEL_TASKS", + "STOPPED" + ] + }, + "QueueLimitAssociationSummaries":{ + "type":"list", + "member":{"shape":"QueueLimitAssociationSummary"} + }, + "QueueLimitAssociationSummary":{ + "type":"structure", + "required":[ + "createdAt", + "createdBy", + "queueId", + "limitId", + "status" + ], + "members":{ + "createdAt":{ + "shape":"CreatedAt", + "documentation":"The Unix timestamp of the date and time that the association was created.
" + }, + "createdBy":{ + "shape":"CreatedBy", + "documentation":"The user identifier of the person that created the association.
" + }, + "updatedAt":{ + "shape":"UpdatedAt", + "documentation":"The Unix timestamp of the date and time that the association was last updated.
" + }, + "updatedBy":{ + "shape":"UpdatedBy", + "documentation":"The user identifier of the person that updated the association.
" + }, + "queueId":{ + "shape":"QueueId", + "documentation":"The unique identifier of the queue in the association.
" + }, + "limitId":{ + "shape":"LimitId", + "documentation":"The unique identifier of the limit in the association.
" + }, + "status":{ + "shape":"QueueLimitAssociationStatus", + "documentation":"The status of task scheduling in the queue-limit association.
ACTIVE
- Association is active.
STOP_LIMIT_USAGE_AND_COMPLETE_TASKS
- Association has stopped scheduling new tasks and is completing current tasks.
STOP_LIMIT_USAGE_AND_CANCEL_TASKS
- Association has stopped scheduling new tasks and is canceling current tasks.
STOPPED
- Association has been stopped.
Provides information about the association between a queue and a limit.
" + }, "QueueMember":{ "type":"structure", "required":[ @@ -9587,7 +10337,7 @@ }, "acceleratorCapabilities":{ "shape":"AcceleratorCapabilities", - "documentation":"The GPU accelerator capabilities required for the Amazon EC2 instances. If you include the acceleratorCapabilities
property in the ServiceManagedEc2InstanceCapabilities object, all of the Amazon EC2 instances will have at least one accelerator.
Describes the GPU accelerator capabilities required for worker host instances in this fleet.
" }, "allowedInstanceTypes":{ "shape":"InstanceTypes", @@ -11085,6 +11835,30 @@ "location":"header", "locationName":"X-Amz-Client-Token" }, + "targetTaskRunStatus":{ + "shape":"JobTargetTaskRunStatus", + "documentation":"The task status to update the job's tasks to.
" + }, + "priority":{ + "shape":"JobPriority", + "documentation":"The job priority to update.
" + }, + "maxFailedTasksCount":{ + "shape":"MaxFailedTasksCount", + "documentation":"The number of task failures before the job stops running and is marked as FAILED
.
The maximum number of retries for a job.
" + }, + "lifecycleStatus":{ + "shape":"UpdateJobLifecycleStatus", + "documentation":"The status of a job in its lifecycle. When you change the status of the job to ARCHIVED
, the job can't be scheduled or archived.
An archived jobs and its steps and tasks are deleted after 120 days. The job can't be recovered.
The maximum number of worker hosts that can concurrently process a job. When the maxWorkerCount
is reached, no more workers will be assigned to process the job, even if the fleets assigned to the job's queue has available workers.
You can't set the maxWorkerCount
to 0. If you set it to -1, there is no maximum number of workers.
If you don't specify the maxWorkerCount
, the default is -1.
The maximum number of workers that can process tasks in the job.
" + }, "farmId":{ "shape":"FarmId", "documentation":"The farm ID of the job to update.
", @@ -11102,30 +11876,48 @@ "documentation":"The job ID to update.
", "location":"uri", "locationName":"jobId" + } + } + }, + "UpdateJobResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateLimitRequest":{ + "type":"structure", + "required":[ + "farmId", + "limitId" + ], + "members":{ + "farmId":{ + "shape":"FarmId", + "documentation":"The unique identifier of the farm that contains the limit.
", + "location":"uri", + "locationName":"farmId" }, - "targetTaskRunStatus":{ - "shape":"JobTargetTaskRunStatus", - "documentation":"The task status to update the job's tasks to.
" - }, - "priority":{ - "shape":"JobPriority", - "documentation":"The job priority to update.
" + "limitId":{ + "shape":"LimitId", + "documentation":"The unique identifier of the limit to update.
", + "location":"uri", + "locationName":"limitId" }, - "maxFailedTasksCount":{ - "shape":"MaxFailedTasksCount", - "documentation":"The number of task failures before the job stops running and is marked as FAILED
.
The new display name of the limit.
This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.
The maximum number of retries for a job.
" + "description":{ + "shape":"Description", + "documentation":"The new description of the limit.
This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.
The status of a job in its lifecycle. When you change the status of the job to ARCHIVED
, the job can't be scheduled or archived.
An archived jobs and its steps and tasks are deleted after 120 days. The job can't be recovered.
The maximum number of resources constrained by this limit. When all of the resources are in use, steps that require the limit won't be scheduled until the resource is available.
If more than the new maximum number is currently in use, running jobs finish but no new jobs are started until the number of resources in use is below the new maximum number.
The maxCount
must not be 0. If the value is -1, there is no restriction on the number of resources that can be acquired for this limit.
The unique identifier of the farm that contains the associated queues and limits.
", + "location":"uri", + "locationName":"farmId" + }, + "queueId":{ + "shape":"QueueId", + "documentation":"The unique identifier of the queue associated to the limit.
", + "location":"uri", + "locationName":"queueId" + }, + "limitId":{ + "shape":"LimitId", + "documentation":"The unique identifier of the limit associated to the queue.
", + "location":"uri", + "locationName":"limitId" + }, + "status":{ + "shape":"UpdateQueueLimitAssociationStatus", + "documentation":"Sets the status of the limit. You can mark the limit active, or you can stop usage of the limit and either complete existing tasks or cancel any existing tasks immediately.
" + } + } + }, + "UpdateQueueLimitAssociationResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateQueueLimitAssociationStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "STOP_LIMIT_USAGE_AND_COMPLETE_TASKS", + "STOP_LIMIT_USAGE_AND_CANCEL_TASKS" + ] + }, "UpdateQueueRequest":{ "type":"structure", "required":[ @@ -11333,11 +12171,11 @@ "UpdateSessionRequest":{ "type":"structure", "required":[ + "targetLifecycleStatus", "farmId", "queueId", "jobId", - "sessionId", - "targetLifecycleStatus" + "sessionId" ], "members":{ "clientToken":{ @@ -11347,6 +12185,10 @@ "location":"header", "locationName":"X-Amz-Client-Token" }, + "targetLifecycleStatus":{ + "shape":"SessionLifecycleTargetStatus", + "documentation":"The life cycle status to update in the session.
" + }, "farmId":{ "shape":"FarmId", "documentation":"The farm ID to update in the session.
", @@ -11370,10 +12212,6 @@ "documentation":"The session ID to update.
", "location":"uri", "locationName":"sessionId" - }, - "targetLifecycleStatus":{ - "shape":"SessionLifecycleTargetStatus", - "documentation":"The life cycle status to update in the session.
" } } }, @@ -11385,13 +12223,17 @@ "UpdateStepRequest":{ "type":"structure", "required":[ + "targetTaskRunStatus", "farmId", "queueId", "jobId", - "stepId", - "targetTaskRunStatus" + "stepId" ], "members":{ + "targetTaskRunStatus":{ + "shape":"StepTargetTaskRunStatus", + "documentation":"The task status to update the step's tasks to.
" + }, "clientToken":{ "shape":"ClientToken", "documentation":"The unique token which the server uses to recognize retries of the same request.
", @@ -11422,10 +12264,6 @@ "documentation":"The step ID to update.
", "location":"uri", "locationName":"stepId" - }, - "targetTaskRunStatus":{ - "shape":"StepTargetTaskRunStatus", - "documentation":"The task status to update the step's tasks to.
" } } }, @@ -11486,12 +12324,12 @@ "UpdateTaskRequest":{ "type":"structure", "required":[ + "targetRunStatus", "farmId", "queueId", "jobId", "stepId", - "taskId", - "targetRunStatus" + "taskId" ], "members":{ "clientToken":{ @@ -11501,6 +12339,10 @@ "location":"header", "locationName":"X-Amz-Client-Token" }, + "targetRunStatus":{ + "shape":"TaskTargetRunStatus", + "documentation":"The run status with which to start the task.
" + }, "farmId":{ "shape":"FarmId", "documentation":"The farm ID to update.
", @@ -11530,10 +12372,6 @@ "documentation":"The task ID to update.
", "location":"uri", "locationName":"taskId" - }, - "targetRunStatus":{ - "shape":"TaskTargetRunStatus", - "documentation":"The run status with which to start the task.
" } } }, diff --git a/botocore/data/deadline/2023-10-12/waiters-2.json b/botocore/data/deadline/2023-10-12/waiters-2.json index 7cb6d90abe..eb6bcc6dfc 100644 --- a/botocore/data/deadline/2023-10-12/waiters-2.json +++ b/botocore/data/deadline/2023-10-12/waiters-2.json @@ -105,6 +105,18 @@ "expected" : "STOPPED" } ] }, + "QueueLimitAssociationStopped" : { + "description" : "Wait until a QueueLimitAssociation is stopped. Use this after setting the status to STOP_LIMIT_USAGE_AND_COMPLETE_TASKS or STOP_LIMIT_USAGE_AND_CANCEL_TASKS to wait for a QueueLimitAssociation to reach STOPPED", + "delay" : 10, + "maxAttempts" : 60, + "operation" : "GetQueueLimitAssociation", + "acceptors" : [ { + "matcher" : "path", + "argument" : "status", + "state" : "success", + "expected" : "STOPPED" + } ] + }, "QueueScheduling" : { "delay" : 10, "maxAttempts" : 70, diff --git a/botocore/data/detective/2018-10-26/service-2.json b/botocore/data/detective/2018-10-26/service-2.json index 3bfa54668f..12ae8c2ced 100644 --- a/botocore/data/detective/2018-10-26/service-2.json +++ b/botocore/data/detective/2018-10-26/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"api.detective", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"Amazon Detective", "serviceId":"Detective", "signatureVersion":"v4", "signingName":"detective", - "uid":"detective-2018-10-26" + "uid":"detective-2018-10-26", + "auth":["aws.auth#sigv4"] }, "operations":{ "AcceptInvitation":{ @@ -446,7 +448,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"ValidationException"} ], - "documentation":"Starts a data source packages for the behavior graph.
" + "documentation":"Starts a data source package for the Detective behavior graph.
" }, "UpdateInvestigationState":{ "name":"UpdateInvestigationState", @@ -1154,7 +1156,7 @@ "documentation":"Contains details about related finding groups.
" } }, - "documentation":"Details about the indicators of compromise which are used to determine if a resource is involved in a security incident. An indicator of compromise (IOC) is an artifact observed in or on a network, system, or environment that can (with a high level of confidence) identify malicious activity or a security incident. For the list of indicators of compromise that are generated by Detective investigations, see Detective investigations.
" + "documentation":"Details about the indicators of compromise which are used to determine if a resource is involved in a security incident. An indicator of compromise (IOC) is an artifact observed in or on a network, system, or environment that can (with a high level of confidence) identify malicious activity or a security incident. For the list of indicators of compromise that are generated by Detective investigations, see Detective investigations.
" }, "IndicatorType":{ "type":"string", @@ -1314,7 +1316,7 @@ }, "IndicatorType":{ "shape":"IndicatorType", - "documentation":"For the list of indicators of compromise that are generated by Detective investigations, see Detective investigations.
" + "documentation":"For the list of indicators of compromise that are generated by Detective investigations, see Detective investigations.
" }, "NextToken":{ "shape":"AiPaginationToken", @@ -2047,7 +2049,7 @@ }, "DatasourcePackages":{ "shape":"DatasourcePackageList", - "documentation":"The data source package start for the behavior graph.
" + "documentation":"The data source package to start for the behavior graph.
" } } }, @@ -2116,5 +2118,5 @@ "value":{"shape":"DatasourcePackageUsageInfo"} } }, - "documentation":"Detective uses machine learning and purpose-built visualizations to help you to analyze and investigate security issues across your Amazon Web Services (Amazon Web Services) workloads. Detective automatically extracts time-based events such as login attempts, API calls, and network traffic from CloudTrail and Amazon Virtual Private Cloud (Amazon VPC) flow logs. It also extracts findings detected by Amazon GuardDuty.
The Detective API primarily supports the creation and management of behavior graphs. A behavior graph contains the extracted data from a set of member accounts, and is created and managed by an administrator account.
To add a member account to the behavior graph, the administrator account sends an invitation to the account. When the account accepts the invitation, it becomes a member account in the behavior graph.
Detective is also integrated with Organizations. The organization management account designates the Detective administrator account for the organization. That account becomes the administrator account for the organization behavior graph. The Detective administrator account is also the delegated administrator account for Detective in Organizations.
The Detective administrator account can enable any organization account as a member account in the organization behavior graph. The organization accounts do not receive invitations. The Detective administrator account can also invite other accounts to the organization behavior graph.
Every behavior graph is specific to a Region. You can only use the API to manage behavior graphs that belong to the Region that is associated with the currently selected endpoint.
The administrator account for a behavior graph can use the Detective API to do the following:
Enable and disable Detective. Enabling Detective creates a new behavior graph.
View the list of member accounts in a behavior graph.
Add member accounts to a behavior graph.
Remove member accounts from a behavior graph.
Apply tags to a behavior graph.
The organization management account can use the Detective API to select the delegated administrator for Detective.
The Detective administrator account for an organization can use the Detective API to do the following:
Perform all of the functions of an administrator account.
Determine whether to automatically enable new organization accounts as member accounts in the organization behavior graph.
An invited member account can use the Detective API to do the following:
View the list of behavior graphs that they are invited to.
Accept an invitation to contribute to a behavior graph.
Decline an invitation to contribute to a behavior graph.
Remove their account from a behavior graph.
All API actions are logged as CloudTrail events. See Logging Detective API Calls with CloudTrail.
We replaced the term \"master account\" with the term \"administrator account\". An administrator account is used to centrally manage multiple accounts. In the case of Detective, the administrator account manages the accounts in their behavior graph.
Detective uses machine learning and purpose-built visualizations to help you to analyze and investigate security issues across your Amazon Web Services (Amazon Web Services) workloads. Detective automatically extracts time-based events such as login attempts, API calls, and network traffic from CloudTrail and Amazon Virtual Private Cloud (Amazon VPC) flow logs. It also extracts findings detected by Amazon GuardDuty.
The Detective API primarily supports the creation and management of behavior graphs. A behavior graph contains the extracted data from a set of member accounts, and is created and managed by an administrator account.
To add a member account to the behavior graph, the administrator account sends an invitation to the account. When the account accepts the invitation, it becomes a member account in the behavior graph.
Detective is also integrated with Organizations. The organization management account designates the Detective administrator account for the organization. That account becomes the administrator account for the organization behavior graph. The Detective administrator account is also the delegated administrator account for Detective in Organizations.
The Detective administrator account can enable any organization account as a member account in the organization behavior graph. The organization accounts do not receive invitations. The Detective administrator account can also invite other accounts to the organization behavior graph.
Every behavior graph is specific to a Region. You can only use the API to manage behavior graphs that belong to the Region that is associated with the currently selected endpoint.
The administrator account for a behavior graph can use the Detective API to do the following:
Enable and disable Detective. Enabling Detective creates a new behavior graph.
View the list of member accounts in a behavior graph.
Add member accounts to a behavior graph.
Remove member accounts from a behavior graph.
Apply tags to a behavior graph.
The organization management account can use the Detective API to select the delegated administrator for Detective.
The Detective administrator account for an organization can use the Detective API to do the following:
Perform all of the functions of an administrator account.
Determine whether to automatically enable new organization accounts as member accounts in the organization behavior graph.
An invited member account can use the Detective API to do the following:
View the list of behavior graphs that they are invited to.
Accept an invitation to contribute to a behavior graph.
Decline an invitation to contribute to a behavior graph.
Remove their account from a behavior graph.
All API actions are logged as CloudTrail events. See Logging Detective API Calls with CloudTrail.
We replaced the term \"master account\" with the term \"administrator account\". An administrator account is used to centrally manage multiple accounts. In the case of Detective, the administrator account manages the accounts in their behavior graph.
Cancels the specified Capacity Reservation, releases the reserved capacity, and changes the Capacity Reservation's state to cancelled
.
You can cancel a Capacity Reservation that is in the following states:
assessing
active
and there is no commitment duration or the commitment duration has elapsed. You can't cancel a future-dated Capacity Reservation during the commitment duration.
If a future-dated Capacity Reservation enters the delayed
state, the commitment duration is waived, and you can cancel it as soon as it enters the active
state.
Instances running in the reserved capacity continue running until you stop them. Stopped instances that target the Capacity Reservation can no longer launch. Modify these instances to either target a different Capacity Reservation, launch On-Demand Instance capacity, or run in any open Capacity Reservation that has matching attributes and sufficient capacity.
" + "documentation":"Cancels the specified Capacity Reservation, releases the reserved capacity, and changes the Capacity Reservation's state to cancelled
.
You can cancel a Capacity Reservation that is in the following states:
assessing
active
and there is no commitment duration or the commitment duration has elapsed. You can't cancel a future-dated Capacity Reservation during the commitment duration.
You can't modify or cancel a Capacity Block. For more information, see Capacity Blocks for ML.
If a future-dated Capacity Reservation enters the delayed
state, the commitment duration is waived, and you can cancel it as soon as it enters the active
state.
Instances running in the reserved capacity continue running until you stop them. Stopped instances that target the Capacity Reservation can no longer launch. Modify these instances to either target a different Capacity Reservation, launch On-Demand Instance capacity, or run in any open Capacity Reservation that has matching attributes and sufficient capacity.
" }, "CancelCapacityReservationFleets":{ "name":"CancelCapacityReservationFleets", @@ -2386,7 +2386,7 @@ }, "input":{"shape":"DescribeCapacityBlockOfferingsRequest"}, "output":{"shape":"DescribeCapacityBlockOfferingsResult"}, - "documentation":"Describes Capacity Block offerings available for purchase in the Amazon Web Services Region that you're currently using. With Capacity Blocks, you purchase a specific instance type for a period of time.
" + "documentation":"Describes Capacity Block offerings available for purchase in the Amazon Web Services Region that you're currently using. With Capacity Blocks, you purchase a specific instance type for a period of time.
To search for an available Capacity Block offering, you specify a reservation duration and instance count. You must select one of the following options.
For reservation durations 1-day increments up 14 days and 7-day increments up to 182 days total
For instance count 1, 2, 4, 8, 16, 32, or 64 instances
Describes a tree-based hierarchy that represents the physical host placement of your EC2 instances within an Availability Zone or Local Zone. You can use this information to determine the relative proximity of your EC2 instances within the Amazon Web Services network to support your tightly coupled workloads.
Limitations
Supported zones
Availability Zone
Local Zone
Supported instance types
hpc6a.48xlarge
| hpc6id.32xlarge
| hpc7a.12xlarge
| hpc7a.24xlarge
| hpc7a.48xlarge
| hpc7a.96xlarge
| hpc7g.4xlarge
| hpc7g.8xlarge
| hpc7g.16xlarge
p3dn.24xlarge
| p4d.24xlarge
| p4de.24xlarge
| p5.48xlarge
| p5e.48xlarge
| p5en.48xlarge
trn1.2xlarge
| trn1.32xlarge
| trn1n.32xlarge
For more information, see Amazon EC2 instance topology in the Amazon EC2 User Guide.
" + "documentation":"Describes a tree-based hierarchy that represents the physical host placement of your EC2 instances within an Availability Zone or Local Zone. You can use this information to determine the relative proximity of your EC2 instances within the Amazon Web Services network to support your tightly coupled workloads.
Limitations
Supported zones
Availability Zone
Local Zone
Supported instance types
hpc6a.48xlarge
| hpc6id.32xlarge
| hpc7a.12xlarge
| hpc7a.24xlarge
| hpc7a.48xlarge
| hpc7a.96xlarge
| hpc7g.4xlarge
| hpc7g.8xlarge
| hpc7g.16xlarge
p3dn.24xlarge
| p4d.24xlarge
| p4de.24xlarge
| p5.48xlarge
| p5e.48xlarge
| p5en.48xlarge
trn1.2xlarge
| trn1.32xlarge
| trn1n.32xlarge
| trn2.48xlarge
| trn2u.48xlarge
For more information, see Amazon EC2 instance topology in the Amazon EC2 User Guide.
" }, "DescribeInstanceTypeOfferings":{ "name":"DescribeInstanceTypeOfferings", @@ -7676,7 +7676,10 @@ }, "AllocationType":{ "type":"string", - "enum":["used"] + "enum":[ + "used", + "future" + ] }, "AllowedImagesSettingsDisabledState":{ "type":"string", @@ -12155,6 +12158,11 @@ "shape":"ClientLoginBannerResponseOptions", "documentation":"Options for enabling a customizable text banner that will be displayed on Amazon Web Services provided clients when a VPN session is established.
", "locationName":"clientLoginBannerOptions" + }, + "DisconnectOnSessionTimeout":{ + "shape":"Boolean", + "documentation":"Indicates whether the client VPN session is disconnected after the maximum sessionTimeoutHours
is reached. If true
, users are prompted to reconnect client VPN. If false
, client VPN attempts to reconnect automatically. The default value is false
.
Describes a Client VPN endpoint.
" @@ -13404,6 +13412,10 @@ "ClientLoginBannerOptions":{ "shape":"ClientLoginBannerOptions", "documentation":"Options for enabling a customizable text banner that will be displayed on Amazon Web Services provided clients when a VPN session is established.
" + }, + "DisconnectOnSessionTimeout":{ + "shape":"Boolean", + "documentation":"Indicates whether the client VPN session is disconnected after the maximum timeout specified in SessionTimeoutHours
is reached. If true
, users are prompted to reconnect client VPN. If false
, client VPN attempts to reconnect automatically. The default value is false
.
Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.
" + "documentation":"Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If you do not specify a client token, a randomly generated token is used for the request to ensure idempotency.
For more information, see Ensuring idempotency.
", + "idempotencyToken":true }, "SpotOptions":{ "shape":"SpotOptionsRequest", @@ -30469,7 +30482,7 @@ }, "EventSubType":{ "shape":"String", - "documentation":"The event.
error
events:
iamFleetRoleInvalid
- The EC2 Fleet or Spot Fleet does not have the required permissions either to launch or terminate an instance.
allLaunchSpecsTemporarilyBlacklisted
- None of the configurations are valid, and several attempts to launch instances have failed. For more information, see the description of the event.
spotInstanceCountLimitExceeded
- You've reached the limit on the number of Spot Instances that you can launch.
spotFleetRequestConfigurationInvalid
- The configuration is not valid. For more information, see the description of the event.
fleetRequestChange
events:
active
- The EC2 Fleet or Spot Fleet request has been validated and Amazon EC2 is attempting to maintain the target number of running instances.
deleted
(EC2 Fleet) / cancelled
(Spot Fleet) - The EC2 Fleet is deleted or the Spot Fleet request is canceled and has no running instances. The EC2 Fleet or Spot Fleet will be deleted two days after its instances are terminated.
deleted_running
(EC2 Fleet) / cancelled_running
(Spot Fleet) - The EC2 Fleet is deleted or the Spot Fleet request is canceled and does not launch additional instances. Its existing instances continue to run until they are interrupted or terminated. The request remains in this state until all instances are interrupted or terminated.
deleted_terminating
(EC2 Fleet) / cancelled_terminating
(Spot Fleet) - The EC2 Fleet is deleted or the Spot Fleet request is canceled and its instances are terminating. The request remains in this state until all instances are terminated.
expired
- The EC2 Fleet or Spot Fleet request has expired. If the request was created with TerminateInstancesWithExpiration
set, a subsequent terminated
event indicates that the instances are terminated.
modify_in_progress
- The EC2 Fleet or Spot Fleet request is being modified. The request remains in this state until the modification is fully processed.
modify_succeeded
- The EC2 Fleet or Spot Fleet request was modified.
submitted
- The EC2 Fleet or Spot Fleet request is being evaluated and Amazon EC2 is preparing to launch the target number of instances.
progress
- The EC2 Fleet or Spot Fleet request is in the process of being fulfilled.
instanceChange
events:
launched
- A new instance was launched.
terminated
- An instance was terminated by the user.
termination_notified
- An instance termination notification was sent when a Spot Instance was terminated by Amazon EC2 during scale-down, when the target capacity of the fleet was modified down, for example, from a target capacity of 4 to a target capacity of 3.
Information
events:
fleetProgressHalted
- The price in every launch specification is not valid because it is below the Spot price (all the launch specifications have produced launchSpecUnusable
events). A launch specification might become valid if the Spot price changes.
launchSpecTemporarilyBlacklisted
- The configuration is not valid and several attempts to launch instances have failed. For more information, see the description of the event.
launchSpecUnusable
- The price in a launch specification is not valid because it is below the Spot price.
registerWithLoadBalancersFailed
- An attempt to register instances with load balancers failed. For more information, see the description of the event.
The event.
error
events:
iamFleetRoleInvalid
- The EC2 Fleet or Spot Fleet does not have the required permissions either to launch or terminate an instance.
allLaunchSpecsTemporarilyBlacklisted
- None of the configurations are valid, and several attempts to launch instances have failed. For more information, see the description of the event.
spotInstanceCountLimitExceeded
- You've reached the limit on the number of Spot Instances that you can launch.
spotFleetRequestConfigurationInvalid
- The configuration is not valid. For more information, see the description of the event.
fleetRequestChange
events:
active
- The EC2 Fleet or Spot Fleet request has been validated and Amazon EC2 is attempting to maintain the target number of running instances.
deleted
(EC2 Fleet) / cancelled
(Spot Fleet) - The EC2 Fleet is deleted or the Spot Fleet request is canceled and has no running instances. The EC2 Fleet or Spot Fleet will be deleted two days after its instances are terminated.
deleted_running
(EC2 Fleet) / cancelled_running
(Spot Fleet) - The EC2 Fleet is deleted or the Spot Fleet request is canceled and does not launch additional instances. Its existing instances continue to run until they are interrupted or terminated. The request remains in this state until all instances are interrupted or terminated.
deleted_terminating
(EC2 Fleet) / cancelled_terminating
(Spot Fleet) - The EC2 Fleet is deleted or the Spot Fleet request is canceled and its instances are terminating. The request remains in this state until all instances are terminated.
expired
- The EC2 Fleet or Spot Fleet request has expired. If the request was created with TerminateInstancesWithExpiration
set, a subsequent terminated
event indicates that the instances are terminated.
modify_in_progress
- The EC2 Fleet or Spot Fleet request is being modified. The request remains in this state until the modification is fully processed.
modify_succeeded
- The EC2 Fleet or Spot Fleet request was modified.
submitted
- The EC2 Fleet or Spot Fleet request is being evaluated and Amazon EC2 is preparing to launch the target number of instances.
progress
- The EC2 Fleet or Spot Fleet request is in the process of being fulfilled.
instanceChange
events:
launched
- A new instance was launched.
terminated
- An instance was terminated by the user.
termination_notified
- An instance termination notification was sent when a Spot Instance was terminated by Amazon EC2 during scale-down, when the target capacity of the fleet was modified down, for example, from a target capacity of 4 to a target capacity of 3.
Information
events:
fleetProgressHalted
- The price in every launch specification is not valid because it is below the Spot price (all the launch specifications have produced launchSpecUnusable
events). A launch specification might become valid if the Spot price changes.
launchSpecTemporarilyBlacklisted
- The configuration is not valid and several attempts to launch instances have failed. For more information, see the description of the event.
launchSpecUnusable
- The price specified in a launch specification is not valid because it is below the Spot price for the requested Spot pools.
Note: Even if a fleet with the maintain
request type is in the process of being canceled, it may still publish a launchSpecUnusable
event. This does not mean that the canceled fleet is attempting to launch a new instance.
registerWithLoadBalancersFailed
- An attempt to register instances with load balancers failed. For more information, see the description of the event.
suppress the specified device included in the block device mapping.
", + "documentation":"Suppresses the specified device included in the block device mapping.
", "locationName":"noDevice" } }, @@ -38310,7 +38323,7 @@ }, "AcceleratorTypes":{ "shape":"AcceleratorTypeSet", - "documentation":"The accelerator types that must be on the instance type.
For instance types with GPU accelerators, specify gpu
.
For instance types with FPGA accelerators, specify fpga
.
Default: Any accelerator type
", + "documentation":"The accelerator types that must be on the instance type.
For instance types with FPGA accelerators, specify fpga
.
For instance types with GPU accelerators, specify gpu
.
For instance types with Inference accelerators, specify inference
.
Default: Any accelerator type
", "locationName":"acceleratorTypeSet" }, "AcceleratorCount":{ @@ -38433,7 +38446,7 @@ }, "AcceleratorTypes":{ "shape":"AcceleratorTypeSet", - "documentation":"The accelerator types that must be on the instance type.
To include instance types with GPU hardware, specify gpu
.
To include instance types with FPGA hardware, specify fpga
.
Default: Any accelerator type
", + "documentation":"The accelerator types that must be on the instance type.
For instance types with FPGA accelerators, specify fpga
.
For instance types with GPU accelerators, specify gpu
.
For instance types with Inference accelerators, specify inference
.
Default: Any accelerator type
", "locationName":"AcceleratorType" }, "AcceleratorCount":{ @@ -39698,7 +39711,15 @@ "i8g.12xlarge", "i8g.16xlarge", "i8g.24xlarge", - "i8g.metal-24xl" + "i8g.metal-24xl", + "u7i-6tb.112xlarge", + "u7i-8tb.112xlarge", + "u7inh-32tb.480xlarge", + "p5e.48xlarge", + "p5en.48xlarge", + "f2.12xlarge", + "f2.48xlarge", + "trn2.48xlarge" ] }, "InstanceTypeHypervisor":{ @@ -44871,6 +44892,10 @@ "ClientLoginBannerOptions":{ "shape":"ClientLoginBannerOptions", "documentation":"Options for enabling a customizable text banner that will be displayed on Amazon Web Services provided clients when a VPN session is established.
" + }, + "DisconnectOnSessionTimeout":{ + "shape":"Boolean", + "documentation":"Indicates whether the client VPN session is disconnected after the maximum timeout specified in sessionTimeoutHours
is reached. If true
, users are prompted to reconnect client VPN. If false
, client VPN attempts to reconnect automatically. The default value is false
.
The maximum hourly price that you're willing to pay for a Spot Instance. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter.
The maximum hourly price that you're willing to pay for a Spot Instance. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter.
If you specify a maximum price, it must be more than USD $0.001. Specifying a value below USD $0.001 will result in an InvalidParameterValue
error message.
Linux-specific modifications that are applied to the container, such as Linux kernel capabilities. For more information see KernelCapabilities.
This parameter is not supported for Windows containers.
Linux-specific modifications that are applied to the default Docker container configuration, such as Linux kernel capabilities. For more information see KernelCapabilities.
This parameter is not supported for Windows containers.
Creates an Fargate profile for your Amazon EKS cluster. You must have at least one Fargate profile in a cluster to be able to run pods on Fargate.
The Fargate profile allows an administrator to declare which pods run on Fargate and specify which pods run on which Fargate profile. This declaration is done through the profile’s selectors. Each profile can have up to five selectors that contain a namespace and labels. A namespace is required for every selector. The label field consists of multiple optional key-value pairs. Pods that match the selectors are scheduled on Fargate. If a to-be-scheduled pod matches any of the selectors in the Fargate profile, then that pod is run on Fargate.
When you create a Fargate profile, you must specify a pod execution role to use with the pods that are scheduled with the profile. This role is added to the cluster's Kubernetes Role Based Access Control (RBAC) for authorization so that the kubelet
that is running on the Fargate infrastructure can register with your Amazon EKS cluster so that it can appear in your cluster as a node. The pod execution role also provides IAM permissions to the Fargate infrastructure to allow read access to Amazon ECR image repositories. For more information, see Pod Execution Role in the Amazon EKS User Guide.
Fargate profiles are immutable. However, you can create a new updated profile to replace an existing profile and then delete the original after the updated profile has finished creating.
If any Fargate profiles in a cluster are in the DELETING
status, you must wait for that Fargate profile to finish deleting before you can create any other profiles in that cluster.
For more information, see Fargate profile in the Amazon EKS User Guide.
" + "documentation":"Creates an Fargate profile for your Amazon EKS cluster. You must have at least one Fargate profile in a cluster to be able to run pods on Fargate.
The Fargate profile allows an administrator to declare which pods run on Fargate and specify which pods run on which Fargate profile. This declaration is done through the profile's selectors. Each profile can have up to five selectors that contain a namespace and labels. A namespace is required for every selector. The label field consists of multiple optional key-value pairs. Pods that match the selectors are scheduled on Fargate. If a to-be-scheduled pod matches any of the selectors in the Fargate profile, then that pod is run on Fargate.
When you create a Fargate profile, you must specify a pod execution role to use with the pods that are scheduled with the profile. This role is added to the cluster's Kubernetes Role Based Access Control (RBAC) for authorization so that the kubelet
that is running on the Fargate infrastructure can register with your Amazon EKS cluster so that it can appear in your cluster as a node. The pod execution role also provides IAM permissions to the Fargate infrastructure to allow read access to Amazon ECR image repositories. For more information, see Pod Execution Role in the Amazon EKS User Guide.
Fargate profiles are immutable. However, you can create a new updated profile to replace an existing profile and then delete the original after the updated profile has finished creating.
If any Fargate profiles in a cluster are in the DELETING
status, you must wait for that Fargate profile to finish deleting before you can create any other profiles in that cluster.
For more information, see Fargate profile in the Amazon EKS User Guide.
" }, "CreateNodegroup":{ "name":"CreateNodegroup", @@ -888,7 +888,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidRequestException"} ], - "documentation":"Updates an Amazon EKS cluster to the specified Kubernetes version. Your cluster continues to function during the update. The response output includes an update ID that you can use to track the status of your cluster update with the DescribeUpdate API operation.
Cluster updates are asynchronous, and they should finish within a few minutes. During an update, the cluster status moves to UPDATING
(this status transition is eventually consistent). When the update is complete (either Failed
or Successful
), the cluster status moves to Active
.
If your cluster has managed node groups attached to it, all of your node groups’ Kubernetes versions must match the cluster’s Kubernetes version in order to update the cluster to a new Kubernetes version.
" + "documentation":"Updates an Amazon EKS cluster to the specified Kubernetes version. Your cluster continues to function during the update. The response output includes an update ID that you can use to track the status of your cluster update with the DescribeUpdate API operation.
Cluster updates are asynchronous, and they should finish within a few minutes. During an update, the cluster status moves to UPDATING
(this status transition is eventually consistent). When the update is complete (either Failed
or Successful
), the cluster status moves to Active
.
If your cluster has managed node groups attached to it, all of your node groups' Kubernetes versions must match the cluster's Kubernetes version in order to update the cluster to a new Kubernetes version.
" }, "UpdateEksAnywhereSubscription":{ "name":"UpdateEksAnywhereSubscription", @@ -923,7 +923,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidRequestException"} ], - "documentation":"Updates an Amazon EKS managed node group configuration. Your node group continues to function during the update. The response output includes an update ID that you can use to track the status of your node group update with the DescribeUpdate API operation. Currently you can update the Kubernetes labels for a node group or the scaling configuration.
" + "documentation":"Updates an Amazon EKS managed node group configuration. Your node group continues to function during the update. The response output includes an update ID that you can use to track the status of your node group update with the DescribeUpdate API operation. You can update the Kubernetes labels and taints for a node group and the scaling and version update configuration.
" }, "UpdateNodegroupVersion":{ "name":"UpdateNodegroupVersion", @@ -1155,7 +1155,7 @@ }, "podIdentityAssociations":{ "shape":"StringList", - "documentation":"An array of Pod Identity Assocations owned by the Addon. Each EKS Pod Identity association maps a role to a service account in a namespace in the cluster.
For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the EKS User Guide.
" + "documentation":"An array of Pod Identity Assocations owned by the Addon. Each EKS Pod Identity association maps a role to a service account in a namespace in the cluster.
For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the Amazon EKS User Guide.
" } }, "documentation":"An Amazon EKS add-on. For more information, see Amazon EKS add-ons in the Amazon EKS User Guide.
" @@ -1169,10 +1169,10 @@ }, "compatibleVersions":{ "shape":"StringList", - "documentation":"A list of compatible add-on versions.
" + "documentation":"The list of compatible Amazon EKS add-on versions for the next Kubernetes version.
" } }, - "documentation":"Contains compatibility information for an Amazon EKS add-on.
" + "documentation":"The summary information about the Amazon EKS add-on compatibility for the next Kubernetes version for an insight check in the UPGRADE_READINESS
category.
The ARN of an IAM Role.
" } }, - "documentation":"A type of Pod Identity Association owned by an Amazon EKS Add-on.
Each EKS Pod Identity Association maps a role to a service account in a namespace in the cluster.
For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the EKS User Guide.
" + "documentation":"A type of Pod Identity Association owned by an Amazon EKS Add-on.
Each EKS Pod Identity Association maps a role to a service account in a namespace in the cluster.
For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the Amazon EKS User Guide.
" }, "AddonPodIdentityAssociationsList":{ "type":"list", @@ -1530,7 +1530,7 @@ "documentation":"Indicates if the block storage capability is enabled on your EKS Auto Mode cluster. If the block storage capability is enabled, EKS Auto Mode will create and delete EBS volumes in your Amazon Web Services account.
" } }, - "documentation":"Indicates the current configuration of the block storage capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled. If the block storage capability is enabled, EKS Auto Mode will create and delete EBS volumes in your Amazon Web Services account. For more information, see EKS Auto Mode block storage capability in the EKS User Guide.
" + "documentation":"Indicates the current configuration of the block storage capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled. If the block storage capability is enabled, EKS Auto Mode will create and delete EBS volumes in your Amazon Web Services account. For more information, see EKS Auto Mode block storage capability in the Amazon EKS User Guide.
" }, "Boolean":{"type":"boolean"}, "BoxedBoolean":{ @@ -1711,7 +1711,7 @@ }, "upgradePolicy":{ "shape":"UpgradePolicyResponse", - "documentation":"This value indicates if extended support is enabled or disabled for the cluster.
Learn more about EKS Extended Support in the EKS User Guide.
" + "documentation":"This value indicates if extended support is enabled or disabled for the cluster.
Learn more about EKS Extended Support in the Amazon EKS User Guide.
" }, "zonalShiftConfig":{ "shape":"ZonalShiftConfigResponse", @@ -1723,11 +1723,11 @@ }, "computeConfig":{ "shape":"ComputeConfigResponse", - "documentation":"Indicates the current configuration of the compute capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled. If the compute capability is enabled, EKS Auto Mode will create and delete EC2 Managed Instances in your Amazon Web Services account. For more information, see EKS Auto Mode compute capability in the EKS User Guide.
" + "documentation":"Indicates the current configuration of the compute capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled. If the compute capability is enabled, EKS Auto Mode will create and delete EC2 Managed Instances in your Amazon Web Services account. For more information, see EKS Auto Mode compute capability in the Amazon EKS User Guide.
" }, "storageConfig":{ "shape":"StorageConfigResponse", - "documentation":"Indicates the current configuration of the block storage capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled. If the block storage capability is enabled, EKS Auto Mode will create and delete EBS volumes in your Amazon Web Services account. For more information, see EKS Auto Mode block storage capability in the EKS User Guide.
" + "documentation":"Indicates the current configuration of the block storage capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled. If the block storage capability is enabled, EKS Auto Mode will create and delete EBS volumes in your Amazon Web Services account. For more information, see EKS Auto Mode block storage capability in the Amazon EKS User Guide.
" } }, "documentation":"An object representing an Amazon EKS cluster.
" @@ -1890,14 +1890,14 @@ }, "nodePools":{ "shape":"StringList", - "documentation":"Configuration for node pools that defines the compute resources for your EKS Auto Mode cluster. For more information, see EKS Auto Mode Node Pools in the EKS User Guide.
" + "documentation":"Configuration for node pools that defines the compute resources for your EKS Auto Mode cluster. For more information, see EKS Auto Mode Node Pools in the Amazon EKS User Guide.
" }, "nodeRoleArn":{ "shape":"String", - "documentation":"The ARN of the IAM Role EKS will assign to EC2 Managed Instances in your EKS Auto Mode cluster. This value cannot be changed after the compute capability of EKS Auto Mode is enabled. For more information, see the IAM Reference in the EKS User Guide.
" + "documentation":"The ARN of the IAM Role EKS will assign to EC2 Managed Instances in your EKS Auto Mode cluster. This value cannot be changed after the compute capability of EKS Auto Mode is enabled. For more information, see the IAM Reference in the Amazon EKS User Guide.
" } }, - "documentation":"Request to update the configuration of the compute capability of your EKS Auto Mode cluster. For example, enable the capability. For more information, see EKS Auto Mode compute capability in the EKS User Guide.
" + "documentation":"Request to update the configuration of the compute capability of your EKS Auto Mode cluster. For example, enable the capability. For more information, see EKS Auto Mode compute capability in the Amazon EKS User Guide.
" }, "ComputeConfigResponse":{ "type":"structure", @@ -1908,7 +1908,7 @@ }, "nodePools":{ "shape":"StringList", - "documentation":"Indicates the current configuration of node pools in your EKS Auto Mode cluster. For more information, see EKS Auto Mode Node Pools in the EKS User Guide.
" + "documentation":"Indicates the current configuration of node pools in your EKS Auto Mode cluster. For more information, see EKS Auto Mode Node Pools in the Amazon EKS User Guide.
" }, "nodeRoleArn":{ "shape":"String", @@ -1983,7 +1983,7 @@ "documentation":"The name of the placement group for the Kubernetes control plane instances. This setting can't be changed after cluster creation.
" } }, - "documentation":"The placement configuration for all the control plane instances of your local Amazon EKS cluster on an Amazon Web Services Outpost. For more information, see Capacity considerations in the Amazon EKS User Guide.
" + "documentation":"The placement configuration for all the control plane instances of your local Amazon EKS cluster on an Amazon Web Services Outpost. For more information, see Capacity considerations in the Amazon EKS User Guide.
" }, "ControlPlanePlacementResponse":{ "type":"structure", @@ -2024,7 +2024,7 @@ }, "principalArn":{ "shape":"String", - "documentation":"The ARN of the IAM principal for the AccessEntry
. You can specify one ARN for each access entry. You can't specify the same ARN in more than one access entry. This value can't be changed after access entry creation.
The valid principals differ depending on the type of the access entry in the type
field. The only valid ARN is IAM roles for the types of access entries for nodes:
. You can use every IAM principal type for
STANDARD
access entries. You can't use the STS session principal type with access entries because this is a temporary principal for each session and not a permanent identity that can be assigned permissions.
IAM best practices recommend using IAM roles with temporary credentials, rather than IAM users with long-term credentials.
" + "documentation":"The ARN of the IAM principal for the AccessEntry
. You can specify one ARN for each access entry. You can't specify the same ARN in more than one access entry. This value can't be changed after access entry creation.
The valid principals differ depending on the type of the access entry in the type
field. For STANDARD
access entries, you can use every IAM principal type. For nodes (EC2
(for EKS Auto Mode), EC2_LINUX
, EC2_WINDOWS
, FARGATE_LINUX
, and HYBRID_LINUX
), the only valid ARN is IAM roles. You can't use the STS session principal type with access entries because this is a temporary principal for each session and not a permanent identity that can be assigned permissions.
IAM best practices recommend using IAM roles with temporary credentials, rather than IAM users with long-term credentials.
" }, "kubernetesGroups":{ "shape":"StringList", @@ -2045,7 +2045,7 @@ }, "type":{ "shape":"String", - "documentation":"The type of the new access entry. Valid values are Standard
, FARGATE_LINUX
, EC2_LINUX
, and EC2_WINDOWS
.
If the principalArn
is for an IAM role that's used for self-managed Amazon EC2 nodes, specify EC2_LINUX
or EC2_WINDOWS
. Amazon EKS grants the necessary permissions to the node for you. If the principalArn
is for any other purpose, specify STANDARD
. If you don't specify a value, Amazon EKS sets the value to STANDARD
. It's unnecessary to create access entries for IAM roles used with Fargate profiles or managed Amazon EC2 nodes, because Amazon EKS creates entries in the aws-auth
ConfigMap
for the roles. You can't change this value once you've created the access entry.
If you set the value to EC2_LINUX
or EC2_WINDOWS
, you can't specify values for kubernetesGroups
, or associate an AccessPolicy
to the access entry.
The type of the new access entry. Valid values are STANDARD
, FARGATE_LINUX
, EC2_LINUX
, EC2_WINDOWS
, EC2
(for EKS Auto Mode), HYBRID_LINUX
, and HYPERPOD_LINUX
.
If the principalArn
is for an IAM role that's used for self-managed Amazon EC2 nodes, specify EC2_LINUX
or EC2_WINDOWS
. Amazon EKS grants the necessary permissions to the node for you. If the principalArn
is for any other purpose, specify STANDARD
. If you don't specify a value, Amazon EKS sets the value to STANDARD
. If you have the access mode of the cluster set to API_AND_CONFIG_MAP
, it's unnecessary to create access entries for IAM roles used with Fargate profiles or managed Amazon EC2 nodes, because Amazon EKS creates entries in the aws-auth
ConfigMap
for the roles. You can't change this value once you've created the access entry.
If you set the value to EC2_LINUX
or EC2_WINDOWS
, you can't specify values for kubernetesGroups
, or associate an AccessPolicy
to the access entry.
An array of Pod Identity Assocations to be created. Each EKS Pod Identity association maps a Kubernetes service account to an IAM Role.
For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the EKS User Guide.
" + "documentation":"An array of Pod Identity Assocations to be created. Each EKS Pod Identity association maps a Kubernetes service account to an IAM Role.
For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the Amazon EKS User Guide.
" } } }, @@ -2139,7 +2139,7 @@ }, "logging":{ "shape":"Logging", - "documentation":"Enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster control plane logs in the Amazon EKS User Guide .
CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see CloudWatch Pricing.
Enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs . By default, cluster control plane logs aren't exported to CloudWatch Logs . For more information, see Amazon EKS Cluster control plane logs in the Amazon EKS User Guide .
CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see CloudWatch Pricing.
Enable or disable ARC zonal shift for the cluster. If zonal shift is enabled, Amazon Web Services configures zonal autoshift for the cluster.
Zonal shift is a feature of Amazon Application Recovery Controller (ARC). ARC zonal shift is designed to be a temporary measure that allows you to move traffic for a resource away from an impaired AZ until the zonal shift expires or you cancel it. You can extend the zonal shift if necessary.
You can start a zonal shift for an EKS cluster, or you can allow Amazon Web Services to do it for you by enabling zonal autoshift. This shift updates the flow of east-to-west network traffic in your cluster to only consider network endpoints for Pods running on worker nodes in healthy AZs. Additionally, any ALB or NLB handling ingress traffic for applications in your EKS cluster will automatically route traffic to targets in the healthy AZs. For more information about zonal shift in EKS, see Learn about Amazon Application Recovery Controller (ARC) Zonal Shift in Amazon EKS in the Amazon EKS User Guide .
" + "documentation":"Enable or disable ARC zonal shift for the cluster. If zonal shift is enabled, Amazon Web Services configures zonal autoshift for the cluster.
Zonal shift is a feature of Amazon Application Recovery Controller (ARC). ARC zonal shift is designed to be a temporary measure that allows you to move traffic for a resource away from an impaired AZ until the zonal shift expires or you cancel it. You can extend the zonal shift if necessary.
You can start a zonal shift for an Amazon EKS cluster, or you can allow Amazon Web Services to do it for you by enabling zonal autoshift. This shift updates the flow of east-to-west network traffic in your cluster to only consider network endpoints for Pods running on worker nodes in healthy AZs. Additionally, any ALB or NLB handling ingress traffic for applications in your Amazon EKS cluster will automatically route traffic to targets in the healthy AZs. For more information about zonal shift in EKS, see Learn about Amazon Application Recovery Controller (ARC) Zonal Shift in Amazon EKS in the Amazon EKS User Guide .
" }, "remoteNetworkConfig":{ "shape":"RemoteNetworkConfigRequest", @@ -3315,7 +3315,7 @@ "documentation":"Indicates if the load balancing capability is enabled on your EKS Auto Mode cluster. If the load balancing capability is enabled, EKS Auto Mode will create and delete load balancers in your Amazon Web Services account.
" } }, - "documentation":"Indicates the current configuration of the load balancing capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled. For more information, see EKS Auto Mode load balancing capability in the EKS User Guide.
" + "documentation":"Indicates the current configuration of the load balancing capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled. For more information, see EKS Auto Mode load balancing capability in the Amazon EKS User Guide.
" }, "EncryptionConfig":{ "type":"structure", @@ -3615,7 +3615,7 @@ }, "addonCompatibilityDetails":{ "shape":"AddonCompatibilityDetails", - "documentation":"A list of AddonCompatibilityDetail objects for Amazon EKS add-ons.
" + "documentation":"A list of AddonCompatibilityDetail
objects for Amazon EKS add-ons.
Summary information that relates to the category of the insight. Currently only returned with certain insights having category UPGRADE_READINESS
.
Specify which IP family is used to assign Kubernetes pod and service IP addresses. If you don't specify a value, ipv4
is used by default. You can only specify an IP family when you create a cluster and can't change this value once the cluster is created. If you specify ipv6
, the VPC and subnets that you specify for cluster creation must have both IPv4
and IPv6
CIDR blocks assigned to them. You can't specify ipv6
for clusters in China Regions.
You can only specify ipv6
for 1.21
and later clusters that use version 1.10.1
or later of the Amazon VPC CNI add-on. If you specify ipv6
, then ensure that your VPC meets the requirements listed in the considerations listed in Assigning IPv6 addresses to pods and services in the Amazon EKS User Guide. Kubernetes assigns services IPv6
addresses from the unique local address range (fc00::/7)
. You can't specify a custom IPv6
CIDR block. Pod addresses are assigned from the subnet's IPv6
CIDR.
Specify which IP family is used to assign Kubernetes pod and service IP addresses. If you don't specify a value, ipv4
is used by default. You can only specify an IP family when you create a cluster and can't change this value once the cluster is created. If you specify ipv6
, the VPC and subnets that you specify for cluster creation must have both IPv4
and IPv6
CIDR blocks assigned to them. You can't specify ipv6
for clusters in China Regions.
You can only specify ipv6
for 1.21
and later clusters that use version 1.10.1
or later of the Amazon VPC CNI add-on. If you specify ipv6
, then ensure that your VPC meets the requirements listed in the considerations listed in Assigning IPv6 addresses to pods and services in the Amazon EKS User Guide. Kubernetes assigns services IPv6
addresses from the unique local address range (fc00::/7)
. You can't specify a custom IPv6
CIDR block. Pod addresses are assigned from the subnet's IPv6
CIDR.
Request to enable or disable the load balancing capability on your EKS Auto Mode cluster. For more information, see EKS Auto Mode load balancing capability in the EKS User Guide.
" + "documentation":"Request to enable or disable the load balancing capability on your EKS Auto Mode cluster. For more information, see EKS Auto Mode load balancing capability in the Amazon EKS User Guide.
" } }, "documentation":"The Kubernetes network configuration for the cluster.
" @@ -4099,7 +4099,7 @@ "members":{ "clusters":{ "shape":"StringList", - "documentation":"A list of all of the clusters for your account in the specified Amazon Web Services Region.
" + "documentation":"A list of all of the clusters for your account in the specified Amazon Web Services Region .
" }, "nextToken":{ "shape":"String", @@ -4455,7 +4455,7 @@ }, "enabled":{ "shape":"BoxedBoolean", - "documentation":"If a log type is enabled, that log type exports its control plane logs to CloudWatch Logs. If a log type isn't enabled, that log type doesn't export its control plane logs. Each individual log type can be enabled or disabled independently.
" + "documentation":"If a log type is enabled, that log type exports its control plane logs to CloudWatch Logs . If a log type isn't enabled, that log type doesn't export its control plane logs. Each individual log type can be enabled or disabled independently.
" } }, "documentation":"An object representing the enabled or disabled Kubernetes control plane logs for your cluster.
" @@ -4719,9 +4719,20 @@ "maxUnavailablePercentage":{ "shape":"PercentCapacity", "documentation":"The maximum percentage of nodes unavailable during a version update. This percentage of nodes are updated in parallel, up to 100 nodes at once. This value or maxUnavailable
is required to have a value.
The configuration for the behavior to follow during a node group version update of this managed node group. You choose between two possible strategies for replacing nodes during an UpdateNodegroupVersion action.
An Amazon EKS managed node group updates by replacing nodes with new nodes of newer AMI versions in parallel. The update strategy changes the managed node update behavior of the managed node group for each quantity. The default strategy has guardrails to protect you from misconfiguration and launches the new instances first, before terminating the old instances. The minimal strategy removes the guardrails and terminates the old instances before launching the new instances. This minimal strategy is useful in scenarios where you are constrained to resources or costs (for example, with hardware accelerators such as GPUs).
" } }, - "documentation":"The node group update configuration.
" + "documentation":"The node group update configuration. An Amazon EKS managed node group updates by replacing nodes with new nodes of newer AMI versions in parallel. You choose the maximum unavailable and the update strategy.
" + }, + "NodegroupUpdateStrategies":{ + "type":"string", + "enum":[ + "DEFAULT", + "MINIMAL" + ] }, "NonZeroInteger":{ "type":"integer", @@ -5246,7 +5257,7 @@ "documentation":"Request to configure EBS Block Storage settings for your EKS Auto Mode cluster.
" } }, - "documentation":"Request to update the configuration of the storage capability of your EKS Auto Mode cluster. For example, enable the capability. For more information, see EKS Auto Mode block storage capability in the EKS User Guide.
" + "documentation":"Request to update the configuration of the storage capability of your EKS Auto Mode cluster. For example, enable the capability. For more information, see EKS Auto Mode block storage capability in the Amazon EKS User Guide.
" }, "StorageConfigResponse":{ "type":"structure", @@ -5521,7 +5532,7 @@ }, "podIdentityAssociations":{ "shape":"AddonPodIdentityAssociationsList", - "documentation":"An array of Pod Identity Assocations to be updated. Each EKS Pod Identity association maps a Kubernetes service account to an IAM Role. If this value is left blank, no change. If an empty array is provided, existing Pod Identity Assocations owned by the Addon are deleted.
For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the EKS User Guide.
" + "documentation":"An array of Pod Identity Assocations to be updated. Each EKS Pod Identity association maps a Kubernetes service account to an IAM Role. If this value is left blank, no change. If an empty array is provided, existing Pod Identity Assocations owned by the Addon are deleted.
For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the Amazon EKS User Guide.
" } } }, @@ -5544,7 +5555,7 @@ "resourcesVpcConfig":{"shape":"VpcConfigRequest"}, "logging":{ "shape":"Logging", - "documentation":"Enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS cluster control plane logs in the Amazon EKS User Guide .
CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see CloudWatch Pricing.
Enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs . By default, cluster control plane logs aren't exported to CloudWatch Logs . For more information, see Amazon EKS cluster control plane logs in the Amazon EKS User Guide .
CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see CloudWatch Pricing.
If the cluster is set to EXTENDED
, it will enter extended support at the end of standard support. If the cluster is set to STANDARD
, it will be automatically upgraded at the end of standard support.
Learn more about EKS Extended Support in the EKS User Guide.
" + "documentation":"If the cluster is set to EXTENDED
, it will enter extended support at the end of standard support. If the cluster is set to STANDARD
, it will be automatically upgraded at the end of standard support.
Learn more about EKS Extended Support in the Amazon EKS User Guide.
" } }, - "documentation":"The support policy to use for the cluster. Extended support allows you to remain on specific Kubernetes versions for longer. Clusters in extended support have higher costs. The default value is EXTENDED
. Use STANDARD
to disable extended support.
Learn more about EKS Extended Support in the EKS User Guide.
" + "documentation":"The support policy to use for the cluster. Extended support allows you to remain on specific Kubernetes versions for longer. Clusters in extended support have higher costs. The default value is EXTENDED
. Use STANDARD
to disable extended support.
Learn more about EKS Extended Support in the Amazon EKS User Guide.
" }, "UpgradePolicyResponse":{ "type":"structure", "members":{ "supportType":{ "shape":"SupportType", - "documentation":"If the cluster is set to EXTENDED
, it will enter extended support at the end of standard support. If the cluster is set to STANDARD
, it will be automatically upgraded at the end of standard support.
Learn more about EKS Extended Support in the EKS User Guide.
" + "documentation":"If the cluster is set to EXTENDED
, it will enter extended support at the end of standard support. If the cluster is set to STANDARD
, it will be automatically upgraded at the end of standard support.
Learn more about EKS Extended Support in the Amazon EKS User Guide.
" } }, - "documentation":"This value indicates if extended support is enabled or disabled for the cluster.
Learn more about EKS Extended Support in the EKS User Guide.
" + "documentation":"This value indicates if extended support is enabled or disabled for the cluster.
Learn more about EKS Extended Support in the Amazon EKS User Guide.
" }, "VpcConfigRequest":{ "type":"structure", diff --git a/botocore/data/emr-serverless/2021-07-13/service-2.json b/botocore/data/emr-serverless/2021-07-13/service-2.json index 86100c0cb3..d32f0d655d 100644 --- a/botocore/data/emr-serverless/2021-07-13/service-2.json +++ b/botocore/data/emr-serverless/2021-07-13/service-2.json @@ -822,7 +822,7 @@ }, "EntryPointPath":{ "type":"string", - "max":256, + "max":4096, "min":1, "pattern":".*\\S.*", "sensitive":true diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 29b28781f8..0383ba8de8 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -101,6 +101,9 @@ "me-south-1" : { "description" : "Middle East (Bahrain)" }, + "mx-central-1" : { + "description" : "Mexico (Central)" + }, "sa-east-1" : { "description" : "South America (Sao Paulo)" }, @@ -198,6 +201,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -289,6 +293,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -563,6 +568,7 @@ }, "aoss" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -571,6 +577,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -1032,6 +1039,16 @@ "tags" : [ "dualstack" ] } ] }, + "mx-central-1" : { + "credentialScope" : { + "region" : "mx-central-1" + }, + "hostname" : "api.ecr.mx-central-1.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.mx-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "sa-east-1" : { "credentialScope" : { "region" : "sa-east-1" @@ -1689,6 +1706,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -1758,6 +1776,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1793,6 +1812,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1899,6 +1919,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -2415,6 +2436,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -2831,6 +2853,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -2901,6 +2924,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -2914,6 +2938,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -3008,6 +3033,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -3979,6 +4005,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -4287,6 +4314,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -4571,6 +4599,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -5592,6 +5621,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -6418,6 +6448,12 @@ "tags" : [ "dualstack" ] } ] }, + "mx-central-1" : { + "variants" : [ { + "hostname" : "datasync.mx-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "sa-east-1" : { "variants" : [ { "hostname" : "datasync.sa-east-1.api.aws", @@ -6767,6 +6803,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -6969,6 +7006,12 @@ "tags" : [ "dualstack" ] } ] }, + "mx-central-1" : { + "variants" : [ { + "hostname" : "dlm.mx-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "sa-east-1" : { "variants" : [ { "hostname" : "dlm.sa-east-1.api.aws", @@ -7058,6 +7101,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -7449,6 +7493,7 @@ }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -7584,6 +7629,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -7767,6 +7813,7 @@ "tags" : [ "dualstack" ] } ] }, + "mx-central-1" : { }, "sa-east-1" : { "variants" : [ { "hostname" : "ec2.sa-east-1.api.aws", @@ -7867,6 +7914,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -7967,6 +8015,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -8082,6 +8131,9 @@ "me-south-1" : { "hostname" : "eks-auth.me-south-1.api.aws" }, + "mx-central-1" : { + "hostname" : "eks-auth.mx-central-1.api.aws" + }, "sa-east-1" : { "hostname" : "eks-auth.sa-east-1.api.aws" }, @@ -8134,6 +8186,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -8457,6 +8510,12 @@ "tags" : [ "fips" ] } ] }, + "ap-southeast-7" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.ap-southeast-7.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "elasticfilesystem-fips.ca-central-1.amazonaws.com", @@ -8601,6 +8660,13 @@ "deprecated" : true, "hostname" : "elasticfilesystem-fips.ap-southeast-5.amazonaws.com" }, + "fips-ap-southeast-7" : { + "credentialScope" : { + "region" : "ap-southeast-7" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.ap-southeast-7.amazonaws.com" + }, "fips-ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -8692,6 +8758,13 @@ "deprecated" : true, "hostname" : "elasticfilesystem-fips.me-south-1.amazonaws.com" }, + "fips-mx-central-1" : { + "credentialScope" : { + "region" : "mx-central-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.mx-central-1.amazonaws.com" + }, "fips-sa-east-1" : { "credentialScope" : { "region" : "sa-east-1" @@ -8745,6 +8818,12 @@ "tags" : [ "fips" ] } ] }, + "mx-central-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.mx-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "sa-east-1" : { "variants" : [ { "hostname" : "elasticfilesystem-fips.sa-east-1.amazonaws.com", @@ -8836,6 +8915,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -8949,6 +9029,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "sslCommonName" : "{service}.{region}.{dnsSuffix}", @@ -9432,6 +9513,12 @@ "tags" : [ "dualstack" ] } ] }, + "mx-central-1" : { + "variants" : [ { + "hostname" : "aos.mx-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "sa-east-1" : { "variants" : [ { "hostname" : "aos.sa-east-1.api.aws", @@ -9560,6 +9647,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -11046,6 +11134,7 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, + "ap-southeast-5" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -11232,6 +11321,7 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, + "ap-southeast-5" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -11650,6 +11740,9 @@ "tags" : [ "dualstack" ] } ] }, + "mx-central-1" : { + "hostname" : "internetmonitor.mx-central-1.api.aws" + }, "sa-east-1" : { "hostname" : "internetmonitor.sa-east-1.api.aws", "variants" : [ { @@ -12670,6 +12763,9 @@ "me-south-1" : { "hostname" : "kendra-ranking.me-south-1.api.aws" }, + "mx-central-1" : { + "hostname" : "kendra-ranking.mx-central-1.api.aws" + }, "sa-east-1" : { "hostname" : "kendra-ranking.sa-east-1.api.aws" }, @@ -12755,6 +12851,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -13255,6 +13352,19 @@ "deprecated" : true, "hostname" : "kms-fips.me-south-1.amazonaws.com" }, + "mx-central-1" : { + "variants" : [ { + "hostname" : "kms-fips.mx-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "mx-central-1-fips" : { + "credentialScope" : { + "region" : "mx-central-1" + }, + "deprecated" : true, + "hostname" : "kms-fips.mx-central-1.amazonaws.com" + }, "sa-east-1" : { "variants" : [ { "hostname" : "kms-fips.sa-east-1.amazonaws.com", @@ -13744,6 +13854,12 @@ "tags" : [ "dualstack" ] } ] }, + "mx-central-1" : { + "variants" : [ { + "hostname" : "lambda.mx-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "sa-east-1" : { "variants" : [ { "hostname" : "lambda.sa-east-1.api.aws", @@ -13801,6 +13917,7 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, + "ap-southeast-5" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -14256,6 +14373,12 @@ "tags" : [ "dualstack" ] } ] }, + "mx-central-1" : { + "variants" : [ { + "hostname" : "logs.mx-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "sa-east-1" : { "variants" : [ { "hostname" : "logs.sa-east-1.api.aws", @@ -15479,6 +15602,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -15914,6 +16038,9 @@ "me-south-1" : { "hostname" : "notifications.me-south-1.api.aws" }, + "mx-central-1" : { + "hostname" : "notifications.mx-central-1.api.aws" + }, "sa-east-1" : { "hostname" : "notifications.sa-east-1.api.aws" }, @@ -15971,6 +16098,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -16672,6 +16800,13 @@ "tags" : [ "dualstack" ] } ] }, + "mx-central-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.mx-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "sa-east-1" : { "protocols" : [ "https" ], "variants" : [ { @@ -17362,6 +17497,9 @@ "me-south-1" : { "hostname" : "qbusiness.me-south-1.api.aws" }, + "mx-central-1" : { + "hostname" : "qbusiness.mx-central-1.api.aws" + }, "sa-east-1" : { "hostname" : "qbusiness.sa-east-1.api.aws" }, @@ -17546,6 +17684,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -17780,6 +17919,7 @@ "tags" : [ "dualstack" ] } ] }, + "mx-central-1" : { }, "sa-east-1" : { "variants" : [ { "hostname" : "rbin.sa-east-1.api.aws", @@ -17888,6 +18028,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "rds-fips.ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -18192,6 +18333,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -18471,26 +18613,126 @@ }, "resiliencehub" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-south-1" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ca-central-1" : { }, - "eu-central-1" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } + "af-south-1" : { + "variants" : [ { + "hostname" : "resiliencehub.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "resiliencehub.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "resiliencehub.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "resiliencehub.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "resiliencehub.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "resiliencehub.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "resiliencehub.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "resiliencehub.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "resiliencehub.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "resiliencehub.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "resiliencehub.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "resiliencehub.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "resiliencehub.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "resiliencehub.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "resiliencehub.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "resiliencehub.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "resiliencehub.us-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "resiliencehub.us-east-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "resiliencehub.us-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "resiliencehub.us-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + } } }, "resource-explorer-2" : { @@ -18672,6 +18914,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -18723,6 +18966,8 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, + "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -18764,6 +19009,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -18884,6 +19130,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -19356,6 +19603,12 @@ "tags" : [ "dualstack" ] } ] }, + "mx-central-1" : { + "variants" : [ { + "hostname" : "s3.dualstack.mx-central-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, "s3-external-1" : { "credentialScope" : { "region" : "us-east-1" @@ -20244,6 +20497,11 @@ "tags" : [ "dualstack" ] } ] }, + "mx-central-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, "sa-east-1" : { "variants" : [ { "tags" : [ "dualstack" ] @@ -21065,6 +21323,12 @@ "tags" : [ "dualstack" ] } ] }, + "mx-central-1" : { + "variants" : [ { + "hostname" : "servicediscovery.mx-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "sa-east-1" : { "variants" : [ { "hostname" : "servicediscovery.sa-east-1.api.aws", @@ -21180,6 +21444,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -21604,77 +21869,212 @@ }, "snowball" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, + "af-south-1" : { + "variants" : [ { + "hostname" : "snowball-fips.af-south-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.af-south-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "snowball-fips.ap-east-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.ap-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ap-northeast-1" : { "variants" : [ { "hostname" : "snowball-fips.ap-northeast-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.ap-northeast-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "ap-northeast-2" : { "variants" : [ { "hostname" : "snowball-fips.ap-northeast-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.ap-northeast-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "ap-northeast-3" : { "variants" : [ { "hostname" : "snowball-fips.ap-northeast-3.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.ap-northeast-3.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] } ] }, "ap-south-1" : { "variants" : [ { "hostname" : "snowball-fips.ap-south-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.ap-south-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.ap-south-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "ap-southeast-1" : { "variants" : [ { "hostname" : "snowball-fips.ap-southeast-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.ap-southeast-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "ap-southeast-2" : { "variants" : [ { "hostname" : "snowball-fips.ap-southeast-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.ap-southeast-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] } ] }, - "ap-southeast-3" : { }, - "ca-central-1" : { + "ap-southeast-3" : { "variants" : [ { - "hostname" : "snowball-fips.ca-central-1.amazonaws.com", + "hostname" : "snowball-fips.ap-southeast-3.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.ap-southeast-3.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] } ] }, - "eu-central-1" : { + "ca-central-1" : { + "variants" : [ { + "hostname" : "snowball-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { "variants" : [ { "hostname" : "snowball-fips.eu-central-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.eu-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "snowball-fips.eu-north-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.eu-north-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "snowball-fips.eu-south-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.eu-south-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.eu-south-1.api.aws", + "tags" : [ "dualstack" ] } ] }, - "eu-north-1" : { }, - "eu-south-1" : { }, "eu-west-1" : { "variants" : [ { "hostname" : "snowball-fips.eu-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.eu-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.eu-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "eu-west-2" : { "variants" : [ { "hostname" : "snowball-fips.eu-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.eu-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.eu-west-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "eu-west-3" : { "variants" : [ { "hostname" : "snowball-fips.eu-west-3.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.eu-west-3.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.eu-west-3.api.aws", + "tags" : [ "dualstack" ] } ] }, + "fips-af-south-1" : { + "credentialScope" : { + "region" : "af-south-1" + }, + "deprecated" : true, + "hostname" : "snowball-fips.af-south-1.amazonaws.com" + }, + "fips-ap-east-1" : { + "credentialScope" : { + "region" : "ap-east-1" + }, + "deprecated" : true, + "hostname" : "snowball-fips.ap-east-1.amazonaws.com" + }, "fips-ap-northeast-1" : { "credentialScope" : { "region" : "ap-northeast-1" @@ -21717,6 +22117,13 @@ "deprecated" : true, "hostname" : "snowball-fips.ap-southeast-2.amazonaws.com" }, + "fips-ap-southeast-3" : { + "credentialScope" : { + "region" : "ap-southeast-3" + }, + "deprecated" : true, + "hostname" : "snowball-fips.ap-southeast-3.amazonaws.com" + }, "fips-ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -21731,6 +22138,20 @@ "deprecated" : true, "hostname" : "snowball-fips.eu-central-1.amazonaws.com" }, + "fips-eu-north-1" : { + "credentialScope" : { + "region" : "eu-north-1" + }, + "deprecated" : true, + "hostname" : "snowball-fips.eu-north-1.amazonaws.com" + }, + "fips-eu-south-1" : { + "credentialScope" : { + "region" : "eu-south-1" + }, + "deprecated" : true, + "hostname" : "snowball-fips.eu-south-1.amazonaws.com" + }, "fips-eu-west-1" : { "credentialScope" : { "region" : "eu-west-1" @@ -21752,6 +22173,20 @@ "deprecated" : true, "hostname" : "snowball-fips.eu-west-3.amazonaws.com" }, + "fips-il-central-1" : { + "credentialScope" : { + "region" : "il-central-1" + }, + "deprecated" : true, + "hostname" : "snowball-fips.il-central-1.amazonaws.com" + }, + "fips-me-central-1" : { + "credentialScope" : { + "region" : "me-central-1" + }, + "deprecated" : true, + "hostname" : "snowball-fips.me-central-1.amazonaws.com" + }, "fips-sa-east-1" : { "credentialScope" : { "region" : "sa-east-1" @@ -21787,36 +22222,88 @@ "deprecated" : true, "hostname" : "snowball-fips.us-west-2.amazonaws.com" }, - "il-central-1" : { }, - "me-central-1" : { }, + "il-central-1" : { + "variants" : [ { + "hostname" : "snowball-fips.il-central-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.il-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "snowball-fips.me-central-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.me-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "sa-east-1" : { "variants" : [ { "hostname" : "snowball-fips.sa-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.sa-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.sa-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-1" : { "variants" : [ { "hostname" : "snowball-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { "hostname" : "snowball-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { "hostname" : "snowball-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "snowball-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -21892,6 +22379,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -21938,8 +22426,18 @@ "ap-southeast-4" : { }, "ap-southeast-5" : { }, "ap-southeast-7" : { }, - "ca-central-1" : { }, - "ca-west-1" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "sqs-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "sqs-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -21948,6 +22446,20 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "sqs-fips.ca-central-1.amazonaws.com" + }, + "fips-ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "sqs-fips.ca-west-1.amazonaws.com" + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -21979,6 +22491,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "sslCommonName" : "queue.{dnsSuffix}", @@ -22087,6 +22600,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -22668,6 +23182,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -22842,6 +23357,7 @@ }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -22883,6 +23399,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -23037,6 +23554,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -23066,39 +23584,156 @@ }, "synthetics" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, - "ap-southeast-5" : { }, - "ap-southeast-7" : { }, + "af-south-1" : { + "variants" : [ { + "hostname" : "synthetics.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "synthetics.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "synthetics.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "synthetics.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "synthetics.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "synthetics.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "synthetics.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "synthetics.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "synthetics.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "synthetics.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "synthetics.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-5" : { + "variants" : [ { + "hostname" : "synthetics.ap-southeast-5.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-7" : { + "variants" : [ { + "hostname" : "synthetics.ap-southeast-7.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "synthetics-fips.ca-central-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "synthetics-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "synthetics.ca-central-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "ca-west-1" : { "variants" : [ { "hostname" : "synthetics-fips.ca-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "synthetics-fips.ca-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "synthetics.ca-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "synthetics.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "synthetics.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "synthetics.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "synthetics.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "synthetics.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "synthetics.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "synthetics.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "synthetics.eu-west-3.api.aws", + "tags" : [ "dualstack" ] } ] }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, "fips-ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -23141,32 +23776,82 @@ "deprecated" : true, "hostname" : "synthetics-fips.us-west-2.amazonaws.com" }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, + "il-central-1" : { + "variants" : [ { + "hostname" : "synthetics.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "synthetics.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "synthetics.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "mx-central-1" : { + "variants" : [ { + "hostname" : "synthetics.mx-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "synthetics.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "variants" : [ { "hostname" : "synthetics-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "synthetics-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "synthetics.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { "hostname" : "synthetics-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "synthetics-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "synthetics.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { "hostname" : "synthetics-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "synthetics-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "synthetics.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "synthetics-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "synthetics-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "synthetics.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -23199,6 +23884,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -25265,6 +25951,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -26827,8 +27514,18 @@ }, "synthetics" : { "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { + "variants" : [ { + "hostname" : "synthetics.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "synthetics.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } } }, "tagging" : { @@ -30838,12 +31535,24 @@ "variants" : [ { "hostname" : "resiliencehub-fips.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "resiliencehub-fips.us-gov-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "resiliencehub.us-gov-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-gov-west-1" : { "variants" : [ { "hostname" : "resiliencehub-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "resiliencehub-fips.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "resiliencehub.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -32418,6 +33127,24 @@ "us-iso-east-1" : { } } }, + "budgets" : { + "endpoints" : { + "aws-iso-global" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "hostname" : "budgets.c2s.ic.gov" + }, + "us-iso-east-1" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "hostname" : "budgets.c2s.ic.gov" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-iso-global" + }, "cloudcontrolapi" : { "endpoints" : { "us-iso-east-1" : { }, diff --git a/botocore/data/firehose/2015-08-04/service-2.json b/botocore/data/firehose/2015-08-04/service-2.json index cca217dd08..9d6974f03d 100644 --- a/botocore/data/firehose/2015-08-04/service-2.json +++ b/botocore/data/firehose/2015-08-04/service-2.json @@ -648,7 +648,7 @@ }, "WarehouseLocation":{ "shape":"WarehouseLocation", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The warehouse location for Apache Iceberg tables. You must configure this when schema evolution and table creation is enabled.
Amazon Data Firehose is in preview release and is subject to change.
" } }, "documentation":"Describes the containers where the destination Apache Iceberg Tables are persisted.
" @@ -754,6 +754,10 @@ "shape":"DeliveryStreamType", "documentation":"The Firehose stream type. This parameter can be one of the following values:
DirectPut
: Provider applications access the Firehose stream directly.
KinesisStreamAsSource
: The Firehose stream uses a Kinesis data stream as a source.
The structure that configures parameters such as ThroughputHintInMBs
for a stream configured with Direct PUT as a source.
When a Kinesis data stream is used as the source for the Firehose stream, a KinesisStreamSourceConfiguration containing the Kinesis data stream Amazon Resource Name (ARN) and the role ARN for the source stream.
" @@ -777,7 +781,7 @@ }, "ElasticsearchDestinationConfiguration":{ "shape":"ElasticsearchDestinationConfiguration", - "documentation":"The destination in Amazon ES. You can specify only one destination.
" + "documentation":"The destination in Amazon OpenSearch Service. You can specify only one destination.
" }, "AmazonopensearchserviceDestinationConfiguration":{ "shape":"AmazonopensearchserviceDestinationConfiguration", @@ -793,7 +797,7 @@ }, "Tags":{ "shape":"TagDeliveryStreamInputTagList", - "documentation":"A set of tags to assign to the Firehose stream. A tag is a key-value pair that you can define and assign to Amazon Web Services resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the Firehose stream. For more information about tags, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.
You can specify up to 50 tags when creating a Firehose stream.
If you specify tags in the CreateDeliveryStream
action, Amazon Data Firehose performs an additional authorization on the firehose:TagDeliveryStream
action to verify if users have permissions to create tags. If you do not provide this permission, requests to create new Firehose Firehose streams with IAM resource tags will fail with an AccessDeniedException
such as following.
AccessDeniedException
User: arn:aws:sts::x:assumed-role/x/x is not authorized to perform: firehose:TagDeliveryStream on resource: arn:aws:firehose:us-east-1:x:deliverystream/x with an explicit deny in an identity-based policy.
For an example IAM policy, see Tag example.
" + "documentation":"A set of tags to assign to the Firehose stream. A tag is a key-value pair that you can define and assign to Amazon Web Services resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the Firehose stream. For more information about tags, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.
You can specify up to 50 tags when creating a Firehose stream.
If you specify tags in the CreateDeliveryStream
action, Amazon Data Firehose performs an additional authorization on the firehose:TagDeliveryStream
action to verify if users have permissions to create tags. If you do not provide this permission, requests to create new Firehose streams with IAM resource tags will fail with an AccessDeniedException
such as following.
AccessDeniedException
User: arn:aws:sts::x:assumed-role/x/x is not authorized to perform: firehose:TagDeliveryStream on resource: arn:aws:firehose:us-east-1:x:deliverystream/x with an explicit deny in an identity-based policy.
For an example IAM policy, see Tag example.
" }, "AmazonOpenSearchServerlessDestinationConfiguration":{ "shape":"AmazonOpenSearchServerlessDestinationConfiguration", @@ -810,7 +814,7 @@ }, "DatabaseSourceConfiguration":{ "shape":"DatabaseSourceConfiguration", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The top level object for configuring streams with database as a source.
Amazon Data Firehose is in preview release and is subject to change.
" } } }, @@ -877,14 +881,14 @@ "members":{ "Include":{ "shape":"DatabaseColumnIncludeOrExcludeList", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The list of column patterns in source database to be included for Firehose to read from.
Amazon Data Firehose is in preview release and is subject to change.
" }, "Exclude":{ "shape":"DatabaseColumnIncludeOrExcludeList", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The list of column patterns in source database to be excluded for Firehose to read from.
Amazon Data Firehose is in preview release and is subject to change.
" } }, - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The structure used to configure the list of column patterns in source database endpoint for Firehose to read from.
Amazon Data Firehose is in preview release and is subject to change.
" }, "DatabaseColumnName":{ "type":"string", @@ -907,14 +911,14 @@ "members":{ "Include":{ "shape":"DatabaseIncludeOrExcludeList", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The list of database patterns in source database endpoint to be included for Firehose to read from.
Amazon Data Firehose is in preview release and is subject to change.
" }, "Exclude":{ "shape":"DatabaseIncludeOrExcludeList", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The list of database patterns in source database endpoint to be excluded for Firehose to read from.
Amazon Data Firehose is in preview release and is subject to change.
" } }, - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The structure used to configure the list of database patterns in source database endpoint for Firehose to read from.
Amazon Data Firehose is in preview release and is subject to change.
" }, "DatabaseName":{ "type":"string", @@ -939,27 +943,27 @@ "members":{ "Id":{ "shape":"NonEmptyStringWithoutWhitespace", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The identifier of the current snapshot of the table in source database endpoint.
Amazon Data Firehose is in preview release and is subject to change.
" }, "Table":{ "shape":"DatabaseTableName", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The fully qualified name of the table in source database endpoint that Firehose reads.
Amazon Data Firehose is in preview release and is subject to change.
" }, "RequestTimestamp":{ "shape":"Timestamp", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The timestamp when the current snapshot is taken on the table.
Amazon Data Firehose is in preview release and is subject to change.
" }, "RequestedBy":{ "shape":"SnapshotRequestedBy", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The principal that sent the request to take the current snapshot on the table.
Amazon Data Firehose is in preview release and is subject to change.
" }, "Status":{ "shape":"SnapshotStatus", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The status of the current snapshot of the table.
Amazon Data Firehose is in preview release and is subject to change.
" }, "FailureDescription":{"shape":"FailureDescription"} }, - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The structure that describes the snapshot information of a table in source database endpoint that Firehose reads.
Amazon Data Firehose is in preview release and is subject to change.
" }, "DatabaseSnapshotInfoList":{ "type":"list", @@ -971,7 +975,7 @@ "members":{ "SecretsManagerConfiguration":{"shape":"SecretsManagerConfiguration"} }, - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The structure to configure the authentication methods for Firehose to connect to source database endpoint.
Amazon Data Firehose is in preview release and is subject to change.
" }, "DatabaseSourceConfiguration":{ "type":"structure", @@ -988,104 +992,104 @@ "members":{ "Type":{ "shape":"DatabaseType", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The type of database engine. This can be one of the following values.
MySQL
PostgreSQL
Amazon Data Firehose is in preview release and is subject to change.
" }, "Endpoint":{ "shape":"DatabaseEndpoint", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The endpoint of the database server.
Amazon Data Firehose is in preview release and is subject to change.
" }, "Port":{ "shape":"DatabasePort", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The port of the database. This can be one of the following values.
3306 for MySQL database type
5432 for PostgreSQL database type
Amazon Data Firehose is in preview release and is subject to change.
" }, "SSLMode":{ "shape":"SSLMode", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The mode to enable or disable SSL when Firehose connects to the database endpoint.
Amazon Data Firehose is in preview release and is subject to change.
" }, "Databases":{ "shape":"DatabaseList", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The list of database patterns in source database endpoint for Firehose to read from.
Amazon Data Firehose is in preview release and is subject to change.
" }, "Tables":{ "shape":"DatabaseTableList", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The list of table patterns in source database endpoint for Firehose to read from.
Amazon Data Firehose is in preview release and is subject to change.
" }, "Columns":{ "shape":"DatabaseColumnList", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The list of column patterns in source database endpoint for Firehose to read from.
Amazon Data Firehose is in preview release and is subject to change.
" }, "SurrogateKeys":{ "shape":"DatabaseSurrogateKeyList", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The optional list of table and column names used as unique key columns when taking snapshot if the tables don’t have primary keys configured.
Amazon Data Firehose is in preview release and is subject to change.
" }, "SnapshotWatermarkTable":{ "shape":"DatabaseTableName", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The fully qualified name of the table in source database endpoint that Firehose uses to track snapshot progress.
Amazon Data Firehose is in preview release and is subject to change.
" }, "DatabaseSourceAuthenticationConfiguration":{ "shape":"DatabaseSourceAuthenticationConfiguration", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The structure to configure the authentication methods for Firehose to connect to source database endpoint.
Amazon Data Firehose is in preview release and is subject to change.
" }, "DatabaseSourceVPCConfiguration":{ "shape":"DatabaseSourceVPCConfiguration", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The details of the VPC Endpoint Service which Firehose uses to create a PrivateLink to the database.
Amazon Data Firehose is in preview release and is subject to change.
" } }, - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The top level object for configuring streams with database as a source.
Amazon Data Firehose is in preview release and is subject to change.
" }, "DatabaseSourceDescription":{ "type":"structure", "members":{ "Type":{ "shape":"DatabaseType", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The type of database engine. This can be one of the following values.
MySQL
PostgreSQL
Amazon Data Firehose is in preview release and is subject to change.
" }, "Endpoint":{ "shape":"DatabaseEndpoint", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The endpoint of the database server.
Amazon Data Firehose is in preview release and is subject to change.
" }, "Port":{ "shape":"DatabasePort", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The port of the database. This can be one of the following values.
3306 for MySQL database type
5432 for PostgreSQL database type
Amazon Data Firehose is in preview release and is subject to change.
" }, "SSLMode":{ "shape":"SSLMode", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The mode to enable or disable SSL when Firehose connects to the database endpoint.
Amazon Data Firehose is in preview release and is subject to change.
" }, "Databases":{ "shape":"DatabaseList", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The list of database patterns in source database endpoint for Firehose to read from.
Amazon Data Firehose is in preview release and is subject to change.
" }, "Tables":{ "shape":"DatabaseTableList", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The list of table patterns in source database endpoint for Firehose to read from.
Amazon Data Firehose is in preview release and is subject to change.
" }, "Columns":{ "shape":"DatabaseColumnList", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The list of column patterns in source database endpoint for Firehose to read from.
Amazon Data Firehose is in preview release and is subject to change.
" }, "SurrogateKeys":{ "shape":"DatabaseColumnIncludeOrExcludeList", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The optional list of table and column names used as unique key columns when taking snapshot if the tables don’t have primary keys configured.
Amazon Data Firehose is in preview release and is subject to change.
" }, "SnapshotWatermarkTable":{ "shape":"DatabaseTableName", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The fully qualified name of the table in source database endpoint that Firehose uses to track snapshot progress.
Amazon Data Firehose is in preview release and is subject to change.
" }, "SnapshotInfo":{ "shape":"DatabaseSnapshotInfoList", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The structure that describes the snapshot information of a table in source database endpoint that Firehose reads.
Amazon Data Firehose is in preview release and is subject to change.
" }, "DatabaseSourceAuthenticationConfiguration":{ "shape":"DatabaseSourceAuthenticationConfiguration", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The structure to configure the authentication methods for Firehose to connect to source database endpoint.
Amazon Data Firehose is in preview release and is subject to change.
" }, "DatabaseSourceVPCConfiguration":{ "shape":"DatabaseSourceVPCConfiguration", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The details of the VPC Endpoint Service which Firehose uses to create a PrivateLink to the database.
Amazon Data Firehose is in preview release and is subject to change.
" } }, - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The top level object for database source description.
Amazon Data Firehose is in preview release and is subject to change.
" }, "DatabaseSourceVPCConfiguration":{ "type":"structure", @@ -1093,10 +1097,10 @@ "members":{ "VpcEndpointServiceName":{ "shape":"VpcEndpointServiceName", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":" The VPC endpoint service name which Firehose uses to create a PrivateLink to the database. The endpoint service must have the Firehose service principle firehose.amazonaws.com
as an allowed principal on the VPC endpoint service. The VPC endpoint service name is a string that looks like com.amazonaws.vpce.<region>.<vpc-endpoint-service-id>
.
Amazon Data Firehose is in preview release and is subject to change.
" } }, - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The structure for details of the VPC Endpoint Service which Firehose uses to create a PrivateLink to the database.
Amazon Data Firehose is in preview release and is subject to change.
" }, "DatabaseSurrogateKeyList":{ "type":"list", @@ -1111,14 +1115,14 @@ "members":{ "Include":{ "shape":"DatabaseTableIncludeOrExcludeList", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The list of table patterns in source database endpoint to be included for Firehose to read from.
Amazon Data Firehose is in preview release and is subject to change.
" }, "Exclude":{ "shape":"DatabaseTableIncludeOrExcludeList", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The list of table patterns in source database endpoint to be excluded for Firehose to read from.
Amazon Data Firehose is in preview release and is subject to change.
" } }, - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The structure used to configure the list of table patterns in source database endpoint for Firehose to read from.
Amazon Data Firehose is in preview release and is subject to change.
" }, "DatabaseTableName":{ "type":"string", @@ -1403,7 +1407,7 @@ }, "ElasticsearchDestinationDescription":{ "shape":"ElasticsearchDestinationDescription", - "documentation":"The destination in Amazon ES.
" + "documentation":"The destination in Amazon OpenSearch Service.
" }, "AmazonopensearchserviceDestinationDescription":{ "shape":"AmazonopensearchserviceDestinationDescription", @@ -1463,7 +1467,7 @@ }, "PartitionSpec":{ "shape":"PartitionSpec", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The partition spec configuration for a table that is used by automatic table creation.
Amazon Data Firehose is in preview release and is subject to change.
" }, "S3ErrorOutputPrefix":{ "shape":"ErrorOutputPrefix", @@ -1476,6 +1480,27 @@ "type":"list", "member":{"shape":"DestinationTableConfiguration"} }, + "DirectPutSourceConfiguration":{ + "type":"structure", + "required":["ThroughputHintInMBs"], + "members":{ + "ThroughputHintInMBs":{ + "shape":"ThroughputHintInMBs", + "documentation":"The value that you configure for this parameter is for information purpose only and does not affect Firehose delivery throughput limit. You can use the Firehose Limits form to request a throughput limit increase.
" + } + }, + "documentation":"The structure that configures parameters such as ThroughputHintInMBs
for a stream configured with Direct PUT as a source.
The value that you configure for this parameter is for information purpose only and does not affect Firehose delivery throughput limit. You can use the Firehose Limits form to request a throughput limit increase.
" + } + }, + "documentation":"The structure that configures parameters such as ThroughputHintInMBs
for a stream configured with Direct PUT as a source.
Specifies that the dynamic partitioning is enabled for this Firehose Firehose stream.
" + "documentation":"Specifies that the dynamic partitioning is enabled for this Firehose stream.
" } }, "documentation":"The configuration of the dynamic partitioning mechanism that creates smaller data sets from the streaming data by partitioning it based on partition keys. Currently, dynamic partitioning is only supported for Amazon S3 destinations.
" @@ -1513,7 +1538,7 @@ "documentation":"Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.
We recommend setting this parameter to a value greater than the amount of data you typically ingest into the Firehose stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.
" } }, - "documentation":"Describes the buffering to perform before delivering data to the Amazon ES destination.
" + "documentation":"Describes the buffering to perform before delivering data to the Amazon OpenSearch Service destination.
" }, "ElasticsearchBufferingIntervalInSeconds":{ "type":"integer", @@ -1541,11 +1566,11 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.
" + "documentation":"The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Amazon OpenSearch Service Configuration API and for indexing documents. For more information, see Grant Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.
" }, "DomainARN":{ "shape":"ElasticsearchDomainARN", - "documentation":"The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeDomain
, DescribeDomains
, and DescribeDomainConfig
after assuming the role specified in RoleARN. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.
Specify either ClusterEndpoint
or DomainARN
.
The ARN of the Amazon OpenSearch Service domain. The IAM role must have permissions for DescribeDomain
, DescribeDomains
, and DescribeDomainConfig
after assuming the role specified in RoleARN. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.
Specify either ClusterEndpoint
or DomainARN
.
The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName
to facilitate the expiration of old data. For more information, see Index Rotation for the Amazon ES Destination. The default value is OneDay
.
The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName
to facilitate the expiration of old data. For more information, see Index Rotation for the Amazon OpenSearch Service Destination. The default value is OneDay
.
The retry behavior in case Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).
" + "documentation":"The retry behavior in case Firehose is unable to deliver documents to Amazon OpenSearch Service. The default value is 300 (5 minutes).
" }, "S3BackupMode":{ "shape":"ElasticsearchS3BackupMode", - "documentation":"Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly
, Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/
appended to the key prefix. When set to AllDocuments
, Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/
appended to the prefix. For more information, see Amazon S3 Backup for the Amazon ES Destination. Default value is FailedDocumentsOnly
.
You can't change this backup mode after you create the Firehose stream.
" + "documentation":"Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly
, Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/
appended to the key prefix. When set to AllDocuments
, Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/
appended to the prefix. For more information, see Amazon S3 Backup for the Amazon OpenSearch Service Destination. Default value is FailedDocumentsOnly
.
You can't change this backup mode after you create the Firehose stream.
" }, "S3Configuration":{ "shape":"S3DestinationConfiguration", @@ -1596,7 +1621,7 @@ "documentation":"Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.
" } }, - "documentation":"Describes the configuration of a destination in Amazon ES.
" + "documentation":"Describes the configuration of a destination in Amazon OpenSearch Service.
" }, "ElasticsearchDestinationDescription":{ "type":"structure", @@ -1607,11 +1632,11 @@ }, "DomainARN":{ "shape":"ElasticsearchDomainARN", - "documentation":"The ARN of the Amazon ES domain. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.
Firehose uses either ClusterEndpoint
or DomainARN
to send data to Amazon ES.
The ARN of the Amazon OpenSearch Service domain. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.
Firehose uses either ClusterEndpoint
or DomainARN
to send data to Amazon OpenSearch Service.
The endpoint to use when communicating with the cluster. Firehose uses either this ClusterEndpoint
or the DomainARN
field to send data to Amazon ES.
The endpoint to use when communicating with the cluster. Firehose uses either this ClusterEndpoint
or the DomainARN
field to send data to Amazon OpenSearch Service.
The Amazon ES retry options.
" + "documentation":"The Amazon OpenSearch Service retry options.
" }, "S3BackupMode":{ "shape":"ElasticsearchS3BackupMode", @@ -1658,18 +1683,18 @@ "documentation":"Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.
" } }, - "documentation":"The destination description in Amazon ES.
" + "documentation":"The destination description in Amazon OpenSearch Service.
" }, "ElasticsearchDestinationUpdate":{ "type":"structure", "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.
" + "documentation":"The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Amazon OpenSearch Service Configuration API and for indexing documents. For more information, see Grant Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.
" }, "DomainARN":{ "shape":"ElasticsearchDomainARN", - "documentation":"The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeDomain
, DescribeDomains
, and DescribeDomainConfig
after assuming the IAM role specified in RoleARN
. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.
Specify either ClusterEndpoint
or DomainARN
.
The ARN of the Amazon OpenSearch Service domain. The IAM role must have permissions for DescribeDomain
, DescribeDomains
, and DescribeDomainConfig
after assuming the IAM role specified in RoleARN
. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.
Specify either ClusterEndpoint
or DomainARN
.
The Elasticsearch index rotation period. Index rotation appends a timestamp to IndexName
to facilitate the expiration of old data. For more information, see Index Rotation for the Amazon ES Destination. Default value is OneDay
.
The Elasticsearch index rotation period. Index rotation appends a timestamp to IndexName
to facilitate the expiration of old data. For more information, see Index Rotation for the Amazon OpenSearch Service Destination. Default value is OneDay
.
The retry behavior in case Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).
" + "documentation":"The retry behavior in case Firehose is unable to deliver documents to Amazon OpenSearch Service. The default value is 300 (5 minutes).
" }, "S3Update":{ "shape":"S3DestinationUpdate", @@ -1712,7 +1737,7 @@ "documentation":"Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.
" } }, - "documentation":"Describes an update for a destination in Amazon ES.
" + "documentation":"Describes an update for a destination in Amazon OpenSearch Service.
" }, "ElasticsearchDomainARN":{ "type":"string", @@ -1746,10 +1771,10 @@ "members":{ "DurationInSeconds":{ "shape":"ElasticsearchRetryDurationInSeconds", - "documentation":"After an initial failure to deliver to Amazon ES, the total amount of time during which Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.
" + "documentation":"After an initial failure to deliver to Amazon OpenSearch Service, the total amount of time during which Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.
" } }, - "documentation":"Configures retry behavior in case Firehose is unable to deliver documents to Amazon ES.
" + "documentation":"Configures retry behavior in case Firehose is unable to deliver documents to Amazon OpenSearch Service.
" }, "ElasticsearchS3BackupMode":{ "type":"string", @@ -2339,11 +2364,11 @@ }, "SchemaEvolutionConfiguration":{ "shape":"SchemaEvolutionConfiguration", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The configuration to enable automatic schema evolution.
Amazon Data Firehose is in preview release and is subject to change.
" }, "TableCreationConfiguration":{ "shape":"TableCreationConfiguration", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The configuration to enable automatic table creation.
Amazon Data Firehose is in preview release and is subject to change.
" }, "BufferingHints":{"shape":"BufferingHints"}, "CloudWatchLoggingOptions":{"shape":"CloudWatchLoggingOptions"}, @@ -2357,6 +2382,10 @@ "shape":"RoleARN", "documentation":"The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling Apache Iceberg Tables.
" }, + "AppendOnly":{ + "shape":"BooleanObject", + "documentation":"Describes whether all incoming data for this delivery stream will be append only (inserts only and not for updates and deletes) for Iceberg delivery. This feature is only applicable for Apache Iceberg Tables.
The default value is false. If you set this value to true, Firehose automatically increases the throughput limit of a stream based on the throttling levels of the stream. If you set this parameter to true for a stream with updates and deletes, you will see out of order delivery.
" + }, "CatalogConfiguration":{ "shape":"CatalogConfiguration", "documentation":"Configuration describing where the destination Apache Iceberg Tables are persisted.
" @@ -2374,11 +2403,11 @@ }, "SchemaEvolutionConfiguration":{ "shape":"SchemaEvolutionConfiguration", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The description of automatic schema evolution configuration.
Amazon Data Firehose is in preview release and is subject to change.
" }, "TableCreationConfiguration":{ "shape":"TableCreationConfiguration", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The description of table creation configuration.
Amazon Data Firehose is in preview release and is subject to change.
" }, "BufferingHints":{"shape":"BufferingHints"}, "CloudWatchLoggingOptions":{"shape":"CloudWatchLoggingOptions"}, @@ -2392,6 +2421,10 @@ "shape":"RoleARN", "documentation":"The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling Apache Iceberg Tables.
" }, + "AppendOnly":{ + "shape":"BooleanObject", + "documentation":"Describes whether all incoming data for this delivery stream will be append only (inserts only and not for updates and deletes) for Iceberg delivery. This feature is only applicable for Apache Iceberg Tables.
The default value is false. If you set this value to true, Firehose automatically increases the throughput limit of a stream based on the throttling levels of the stream. If you set this parameter to true for a stream with updates and deletes, you will see out of order delivery.
" + }, "CatalogConfiguration":{ "shape":"CatalogConfiguration", "documentation":"
Configuration describing where the destination Iceberg tables are persisted.
" @@ -2409,11 +2442,11 @@ }, "SchemaEvolutionConfiguration":{ "shape":"SchemaEvolutionConfiguration", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The configuration to enable automatic schema evolution.
Amazon Data Firehose is in preview release and is subject to change.
" }, "TableCreationConfiguration":{ "shape":"TableCreationConfiguration", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The configuration to enable automatic table creation.
Amazon Data Firehose is in preview release and is subject to change.
" }, "BufferingHints":{"shape":"BufferingHints"}, "CloudWatchLoggingOptions":{"shape":"CloudWatchLoggingOptions"}, @@ -2427,6 +2460,10 @@ "shape":"RoleARN", "documentation":"The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling Apache Iceberg Tables.
" }, + "AppendOnly":{ + "shape":"BooleanObject", + "documentation":"Describes whether all incoming data for this delivery stream will be append only (inserts only and not for updates and deletes) for Iceberg delivery. This feature is only applicable for Apache Iceberg Tables.
The default value is false. If you set this value to true, Firehose automatically increases the throughput limit of a stream based on the throttling levels of the stream. If you set this parameter to true for a stream with updates and deletes, you will see out of order delivery.
" + }, "CatalogConfiguration":{ "shape":"CatalogConfiguration", "documentation":"Configuration describing where the destination Iceberg tables are persisted.
" @@ -2544,7 +2581,7 @@ "documentation":"Firehose starts retrieving records from the Kinesis data stream starting with this timestamp.
" } }, - "documentation":"Details about a Kinesis data stream used as the source for a Firehose Firehose stream.
" + "documentation":"Details about a Kinesis data stream used as the source for a Firehose stream.
" }, "LimitExceededException":{ "type":"structure", @@ -2719,7 +2756,7 @@ "documentation":"The start date and time in UTC for the offset position within your MSK topic from where Firehose begins to read. By default, this is set to timestamp when Firehose becomes Active.
If you want to create a Firehose stream with Earliest start position from SDK or CLI, you need to set the ReadFromTimestampUTC
parameter to Epoch (1970-01-01T00:00:00Z).
Details about the Amazon MSK cluster used as the source for a Firehose Firehose stream.
" + "documentation":"Details about the Amazon MSK cluster used as the source for a Firehose stream.
" }, "NoEncryptionConfig":{ "type":"string", @@ -2893,10 +2930,10 @@ "members":{ "SourceName":{ "shape":"NonEmptyStringWithoutWhitespace", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The column name to be configured in partition spec.
Amazon Data Firehose is in preview release and is subject to change.
" } }, - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"Represents a single field in a PartitionSpec
.
Amazon Data Firehose is in preview release and is subject to change.
" }, "PartitionFields":{ "type":"list", @@ -2907,10 +2944,10 @@ "members":{ "Identity":{ "shape":"PartitionFields", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"List of identity transforms that performs an identity transformation. The transform takes the source value, and does not modify it. Result type is the source type.
Amazon Data Firehose is in preview release and is subject to change.
" } }, - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"Represents how to produce partition data for a table. Partition data is produced by transforming columns in a table. Each column transform is represented by a named PartitionField
.
Here is an example of the schema in JSON.
\"partitionSpec\": { \"identity\": [ {\"sourceName\": \"column1\"}, {\"sourceName\": \"column2\"}, {\"sourceName\": \"column3\"} ] }
Amazon Data Firehose is in preview release and is subject to change.
" }, "Password":{ "type":"string", @@ -3543,10 +3580,10 @@ "members":{ "Enabled":{ "shape":"BooleanObject", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"Specify whether you want to enable schema evolution.
Amazon Data Firehose is in preview release and is subject to change.
" } }, - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The configuration to enable schema evolution.
Amazon Data Firehose is in preview release and is subject to change.
" }, "SecretARN":{ "type":"string", @@ -3584,14 +3621,14 @@ "members":{ "ParquetSerDe":{ "shape":"ParquetSerDe", - "documentation":"A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet.
" + "documentation":"A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet.
" }, "OrcSerDe":{ "shape":"OrcSerDe", "documentation":"A serializer to use for converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC.
" } }, - "documentation":"The serializer that you want Firehose to use to convert data to the target format before writing it to Amazon S3. Firehose supports two types of serializers: the ORC SerDe and the Parquet SerDe.
" + "documentation":"The serializer that you want Firehose to use to convert data to the target format before writing it to Amazon S3. Firehose supports two types of serializers: the ORC SerDe and the Parquet SerDe.
" }, "ServiceUnavailableException":{ "type":"structure", @@ -3725,11 +3762,11 @@ }, "MetaDataColumnName":{ "shape":"SnowflakeMetaDataColumnName", - "documentation":"The name of the record metadata column
" + "documentation":"Specify a column name in the table, where the metadata information has to be loaded. When you enable this field, you will see the following column in the snowflake table, which differs based on the source type.
For Direct PUT as source
{ \"firehoseDeliveryStreamName\" : \"streamname\", \"IngestionTime\" : \"timestamp\" }
For Kinesis Data Stream as source
\"kinesisStreamName\" : \"streamname\", \"kinesisShardId\" : \"Id\", \"kinesisPartitionKey\" : \"key\", \"kinesisSequenceNumber\" : \"1234\", \"subsequenceNumber\" : \"2334\", \"IngestionTime\" : \"timestamp\" }
The name of the record content column
" + "documentation":"The name of the record content column.
" }, "SnowflakeVpcConfiguration":{ "shape":"SnowflakeVpcConfiguration", @@ -4003,6 +4040,10 @@ "SourceDescription":{ "type":"structure", "members":{ + "DirectPutSourceDescription":{ + "shape":"DirectPutSourceDescription", + "documentation":"Details about Direct PUT used as the source for a Firehose stream.
" + }, "KinesisStreamSourceDescription":{ "shape":"KinesisStreamSourceDescription", "documentation":"The KinesisStreamSourceDescription value for the source Kinesis data stream.
" @@ -4013,10 +4054,10 @@ }, "DatabaseSourceDescription":{ "shape":"DatabaseSourceDescription", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"Details about a database used as the source for a Firehose stream.
Amazon Data Firehose is in preview release and is subject to change.
" } }, - "documentation":"Details about a Kinesis data stream used as the source for a Firehose Firehose stream.
" + "documentation":"Details about a Kinesis data stream used as the source for a Firehose stream.
" }, "SplunkBufferingHints":{ "type":"structure", @@ -4271,10 +4312,10 @@ "members":{ "Enabled":{ "shape":"BooleanObject", - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"Specify whether you want to enable automatic table creation.
Amazon Data Firehose is in preview release and is subject to change.
" } }, - "documentation":"
Amazon Data Firehose is in preview release and is subject to change.
" + "documentation":"The configuration to enable automatic table creation.
Amazon Data Firehose is in preview release and is subject to change.
" }, "Tag":{ "type":"structure", @@ -4337,6 +4378,11 @@ "min":0, "pattern":"^[\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@%]*$" }, + "ThroughputHintInMBs":{ + "type":"integer", + "max":100, + "min":1 + }, "Timestamp":{"type":"timestamp"}, "TopicName":{ "type":"string", @@ -4401,7 +4447,7 @@ }, "ElasticsearchDestinationUpdate":{ "shape":"ElasticsearchDestinationUpdate", - "documentation":"Describes an update for a destination in Amazon ES.
" + "documentation":"Describes an update for a destination in Amazon OpenSearch Service.
" }, "AmazonopensearchserviceDestinationUpdate":{ "shape":"AmazonopensearchserviceDestinationUpdate", @@ -4451,7 +4497,7 @@ "members":{ "SubnetIds":{ "shape":"SubnetIdList", - "documentation":"The IDs of the subnets that you want Firehose to use to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.
The number of ENIs that Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Firehose can create up to three ENIs for this Firehose stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.
" + "documentation":"The IDs of the subnets that you want Firehose to use to create ENIs in the VPC of the Amazon OpenSearch Service destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon OpenSearch Service endpoints. Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.
The number of ENIs that Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Firehose can create up to three ENIs for this Firehose stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.
" }, "RoleARN":{ "shape":"RoleARN", @@ -4459,7 +4505,7 @@ }, "SecurityGroupIds":{ "shape":"SecurityGroupIdList", - "documentation":"The IDs of the security groups that you want Firehose to use when it creates ENIs in the VPC of the Amazon ES destination. You can use the same security group that the Amazon ES domain uses or different ones. If you specify different security groups here, ensure that they allow outbound HTTPS traffic to the Amazon ES domain's security group. Also ensure that the Amazon ES domain's security group allows HTTPS traffic from the security groups specified here. If you use the same security group for both your delivery stream and the Amazon ES domain, make sure the security group inbound rule allows HTTPS traffic. For more information about security group rules, see Security group rules in the Amazon VPC documentation.
" + "documentation":"The IDs of the security groups that you want Firehose to use when it creates ENIs in the VPC of the Amazon OpenSearch Service destination. You can use the same security group that the Amazon OpenSearch Service domain uses or different ones. If you specify different security groups here, ensure that they allow outbound HTTPS traffic to the Amazon OpenSearch Service domain's security group. Also ensure that the Amazon OpenSearch Service domain's security group allows HTTPS traffic from the security groups specified here. If you use the same security group for both your delivery stream and the Amazon OpenSearch Service domain, make sure the security group inbound rule allows HTTPS traffic. For more information about security group rules, see Security group rules in the Amazon VPC documentation.
" } }, "documentation":"The details of the VPC of the Amazon OpenSearch or Amazon OpenSearch Serverless destination.
" @@ -4475,7 +4521,7 @@ "members":{ "SubnetIds":{ "shape":"SubnetIdList", - "documentation":"The IDs of the subnets that Firehose uses to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.
The number of ENIs that Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Firehose can create up to three ENIs for this Firehose stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.
" + "documentation":"The IDs of the subnets that Firehose uses to create ENIs in the VPC of the Amazon OpenSearch Service destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon OpenSearch Service endpoints. Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.
The number of ENIs that Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Firehose can create up to three ENIs for this Firehose stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.
" }, "RoleARN":{ "shape":"RoleARN", @@ -4483,14 +4529,14 @@ }, "SecurityGroupIds":{ "shape":"SecurityGroupIdList", - "documentation":"The IDs of the security groups that Firehose uses when it creates ENIs in the VPC of the Amazon ES destination. You can use the same security group that the Amazon ES domain uses or different ones. If you specify different security groups, ensure that they allow outbound HTTPS traffic to the Amazon ES domain's security group. Also ensure that the Amazon ES domain's security group allows HTTPS traffic from the security groups specified here. If you use the same security group for both your Firehose stream and the Amazon ES domain, make sure the security group inbound rule allows HTTPS traffic. For more information about security group rules, see Security group rules in the Amazon VPC documentation.
" + "documentation":"The IDs of the security groups that Firehose uses when it creates ENIs in the VPC of the Amazon OpenSearch Service destination. You can use the same security group that the Amazon ES domain uses or different ones. If you specify different security groups, ensure that they allow outbound HTTPS traffic to the Amazon OpenSearch Service domain's security group. Also ensure that the Amazon OpenSearch Service domain's security group allows HTTPS traffic from the security groups specified here. If you use the same security group for both your Firehose stream and the Amazon OpenSearch Service domain, make sure the security group inbound rule allows HTTPS traffic. For more information about security group rules, see Security group rules in the Amazon VPC documentation.
" }, "VpcId":{ "shape":"NonEmptyStringWithoutWhitespace", - "documentation":"The ID of the Amazon ES destination's VPC.
" + "documentation":"The ID of the Amazon OpenSearch Service destination's VPC.
" } }, - "documentation":"The details of the VPC of the Amazon ES destination.
" + "documentation":"The details of the VPC of the Amazon OpenSearch Service destination.
" }, "VpcEndpointServiceName":{ "type":"string", diff --git a/botocore/data/fms/2018-01-01/service-2.json b/botocore/data/fms/2018-01-01/service-2.json index 8dbb9fdef9..70a0bae6c6 100644 --- a/botocore/data/fms/2018-01-01/service-2.json +++ b/botocore/data/fms/2018-01-01/service-2.json @@ -3211,6 +3211,10 @@ "PolicyStatus":{ "shape":"CustomerPolicyStatus", "documentation":"Indicates whether the policy is in or out of an admin's policy or Region scope.
ACTIVE
- The administrator can manage and delete the policy.
OUT_OF_ADMIN_SCOPE
- The administrator can view the policy, but they can't edit or delete the policy. Existing policy protections stay in place. Any new resources that come into scope of the policy won't be protected.
Specifies whether to combine multiple resource tags with AND, so that a resource must have all tags to be included or excluded, or OR, so that a resource must have at least one tag.
Default: AND
An Firewall Manager policy.
" @@ -3925,18 +3929,25 @@ "documentation":"The resource tag value. To specify an empty string value, either don't provide this or specify it as \"\".
" } }, - "documentation":"The resource tags that Firewall Manager uses to determine if a particular resource should be included or excluded from the Firewall Manager policy. Tags enable you to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. Each tag consists of a key and an optional value. Firewall Manager combines the tags with \"AND\" so that, if you add more than one tag to a policy scope, a resource must have all the specified tags to be included or excluded. For more information, see Working with Tag Editor.
Every resource tag must have a string value, either a non-empty string or an empty string. If you don't provide a value for a resource tag, Firewall Manager saves the value as an empty string: \"\". When Firewall Manager compares tags, it only matches two tags if they have the same key and the same value. A tag with an empty string value only matches with tags that also have an empty string value.
" + "documentation":"The resource tags that Firewall Manager uses to determine if a particular resource should be included or excluded from the Firewall Manager policy. Tags enable you to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. Each tag consists of a key and an optional value. If you add more than one tag to a policy, you can specify whether to combine them using the logical AND operator or the logical OR operator. For more information, see Working with Tag Editor.
Every resource tag must have a string value, either a non-empty string or an empty string. If you don't provide a value for a resource tag, Firewall Manager saves the value as an empty string: \"\". When Firewall Manager compares tags, it only matches two tags if they have the same key and the same value. A tag with an empty string value only matches with tags that also have an empty string value.
" }, "ResourceTagKey":{ "type":"string", "max":128, "min":1, - "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@*\\\\]*)$" + }, + "ResourceTagLogicalOperator":{ + "type":"string", + "enum":[ + "AND", + "OR" + ] }, "ResourceTagValue":{ "type":"string", "max":256, - "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@*\\\\]*)$" }, "ResourceTags":{ "type":"list", diff --git a/botocore/data/gamelift/2015-10-01/service-2.json b/botocore/data/gamelift/2015-10-01/service-2.json index e456a70e3b..208515f1fe 100644 --- a/botocore/data/gamelift/2015-10-01/service-2.json +++ b/botocore/data/gamelift/2015-10-01/service-2.json @@ -971,7 +971,7 @@ {"shape":"NotFoundException"}, {"shape":"UnauthorizedException"} ], - "documentation":"Retrieves information, including current status, about a game session placement request.
To get game session placement details, specify the placement ID.
This operation is not designed to be continually called to track game session status. This practice can cause you to exceed your API limit, which results in errors. Instead, you must configure configure an Amazon Simple Notification Service (SNS) topic to receive notifications from FlexMatch or queues. Continuously polling with DescribeGameSessionPlacement
should only be used for games in development with low game session usage.
Retrieves information, including current status, about a game session placement request.
To get game session placement details, specify the placement ID.
This operation is not designed to be continually called to track game session status. This practice can cause you to exceed your API limit, which results in errors. Instead, you must configure an Amazon Simple Notification Service (SNS) topic to receive notifications from FlexMatch or queues. Continuously polling with DescribeGameSessionPlacement
should only be used for games in development with low game session usage.
Places a request for a new game session in a queue. When processing a placement request, Amazon GameLift searches for available resources on the queue's destinations, scanning each until it finds resources or the placement request times out.
A game session placement request can also request player sessions. When a new game session is successfully created, Amazon GameLift creates a player session for each player included in the request.
When placing a game session, by default Amazon GameLift tries each fleet in the order they are listed in the queue configuration. Ideally, a queue's destinations are listed in preference order.
Alternatively, when requesting a game session with players, you can also provide latency data for each player in relevant Regions. Latency data indicates the performance lag a player experiences when connected to a fleet in the Region. Amazon GameLift uses latency data to reorder the list of destinations to place the game session in a Region with minimal lag. If latency data is provided for multiple players, Amazon GameLift calculates each Region's average lag for all players and reorders to get the best game play across all players.
To place a new game session request, specify the following:
The queue name and a set of game session properties and settings
A unique ID (such as a UUID) for the placement. You use this ID to track the status of the placement request
(Optional) A set of player data and a unique player ID for each player that you are joining to the new game session (player data is optional, but if you include it, you must also provide a unique ID for each player)
Latency data for all players (if you want to optimize game play for the players)
If successful, a new game session placement is created.
To track the status of a placement request, call DescribeGameSessionPlacement and check the request's status. If the status is FULFILLED
, a new game session has been created and a game session ARN and Region are referenced. If the placement request times out, you can resubmit the request or retry it with a different queue.
Makes a request to start a new game session using a game session queue. When processing a placement request in a queue, Amazon GameLift finds the best possible available resource to host the game session and prompts the resource to start the game session.
Request options
Call this API with the following minimum parameters: GameSessionQueueName, MaximumPlayerSessionCount, and PlacementID. You can also include game session data (data formatted as strings) or game properties (data formatted as key-value pairs) to pass to the new game session.
You can change how Amazon GameLift chooses a hosting resource for the new game session. Prioritizing resources for game session placements is defined when you configure a game session queue. You can use the default prioritization process or specify a custom process by providing a PriorityConfiguration when you create or update a queue.
Prioritize based on resource cost and location, using the queue's configured priority settings. Call this API with the minimum parameters.
Prioritize based on latency. Include a set of values for PlayerLatencies. You can provide latency data with or without player session data. This option instructs Amazon GameLift to reorder the queue's prioritized locations list based on the latency data. If latency data is provided for multiple players, Amazon GameLift calculates each location's average latency for all players and reorders to find the lowest latency across all players. Don't include latency data if you're providing a custom list of locations.
Prioritize based on a custom list of locations. If you're using a queue that's configured to prioritize location first (see PriorityConfiguration for game session queues), use the PriorityConfigurationOverride parameter to substitute a different location list for this placement request. When prioritizing placements by location, Amazon GameLift searches each location in prioritized order to find an available hosting resource for the new game session. You can choose whether to use the override list for the first placement attempt only or for all attempts.
You can request new player sessions for a group of players. Include the DesiredPlayerSessions parameter and include at minimum a unique player ID for each. You can also include player-specific data to pass to the new game session.
Result
If successful, this request generates a new game session placement request and adds it to the game session queue for Amazon GameLift to process in turn. You can track the status of individual placement requests by calling DescribeGameSessionPlacement. A new game session is running if the status is FULFILLED
and the request returns the game session connection information (IP address and port). If you include player session data, Amazon GameLift creates a player session for each player ID in the request.
The request results in a BadRequestException
in the following situations:
If the request includes both PlayerLatencies and PriorityConfigurationOverride parameters.
If the request includes the PriorityConfigurationOverride parameter and designates a queue doesn't prioritize locations.
Amazon GameLift continues to retry each placement request until it reaches the queue's timeout setting. If a request times out, you can resubmit the request to the same queue or try a different queue.
" }, "StartMatchBackfill":{ "name":"StartMatchBackfill", @@ -1719,7 +1720,7 @@ {"shape":"InvalidGameSessionStatusException"}, {"shape":"NotReadyException"} ], - "documentation":"Ends a game session that's currently in progress. You can use this action to terminate any game session that isn't in TERMINATED
or TERMINATING
status. Terminating a game session is the most efficient way to free up a server process when it's hosting a game session that's in a bad state or not ending naturally. You can use this action to terminate a game session that's being hosted on any type of Amazon GameLift fleet compute, including computes for managed EC2, managed container, and Anywhere fleets.
There are two potential methods for terminating a game session:
With a graceful termination, the Amazon GameLift service prompts the server process to initiate its normal game session shutdown sequence. This sequence is implemented in the game server code and might involve a variety of actions to gracefully end a game session, such as notifying players, and stop the server process.
With a forceful termination, the Amazon GameLift service takes immediate action to terminate the game session by stopping the server process. Termination occurs without the normal game session shutdown sequence.
Request options
Request termination for a single game session. Provide the game session ID and the termination method.
Results
If successful, game session termination is initiated, which includes changing the game session status to TERMINATING
. As a result of this action, and depending on the implementation of OnProcessTerminate()
, the server process either becomes available to host a new game session, or it's recycled and a new server process started with availability to host a game session. The game session status is changed to TERMINATED
, with a status reason that indicates the termination method used.
Ends a game session that's currently in progress. Use this action to terminate any game session that isn't in ERROR
status. Terminating a game session is the most efficient way to free up a server process when it's hosting a game session that's in a bad state or not ending properly. You can use this action to terminate a game session that's being hosted on any type of Amazon GameLift fleet compute, including computes for managed EC2, managed container, and Anywhere fleets. The game server must be integrated with Amazon GameLift server SDK 5.x or greater.
Request options
Request termination for a single game session. Provide the game session ID and the termination mode. There are two potential methods for terminating a game session:
Initiate a graceful termination using the normal game session shutdown sequence. With this mode, the Amazon GameLift service prompts the server process that's hosting the game session by calling the server SDK callback method OnProcessTerminate()
. The callback implementation is part of the custom game server code. It might involve a variety of actions to gracefully end a game session, such as notifying players, before stopping the server process.
Force an immediate game session termination. With this mode, the Amazon GameLift service takes action to stop the server process, which ends the game session without the normal game session shutdown sequence.
Results
If successful, game session termination is initiated. During this activity, the game session status is changed to TERMINATING
. When completed, the server process that was hosting the game session has been stopped and replaced with a new server process that's ready to host a new game session. The old game session's status is changed to TERMINATED
with a status reason that indicates the termination method used.
Learn more
Add Amazon GameLift to your game server
Amazon GameLift server SDK 5 reference guide for OnProcessTerminate()
(C++) (C#) (Unreal) (Go)
The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912
. In a GameLift fleet ARN, the resource ID matches the FleetId
value.
The unique identifier for an Identity and Access Management (IAM) role with permissions to run your containers on resources that are managed by Amazon GameLift. See Set up an IAM service role. This fleet property can't be changed.
" }, "GameServerContainerGroupDefinitionName":{ @@ -3027,7 +3028,7 @@ "required":["FleetRoleArn"], "members":{ "FleetRoleArn":{ - "shape":"ArnStringModel", + "shape":"IamRoleArn", "documentation":"The unique identifier for an Identity and Access Management (IAM) role with permissions to run your containers on resources that are managed by Amazon GameLift. Use an IAM service role with the GameLiftContainerFleetPolicy
managed policy attached. For more information, see Set up an IAM service role. You can't change this fleet property after the fleet is created.
IAM role ARN values use the following pattern: arn:aws:iam::[Amazon Web Services account]:role/[role name]
.
A list of locations to allow game session placement in, in the form of Amazon Web Services Region codes such as us-west-2
.
A list of fleet locations where a game session queue can place new game sessions. You can use a filter to temporarily turn off placements for specific locations. For queues that have multi-location fleets, you can use a filter configuration allow placement with some, but not all of these locations.
" + "documentation":"A list of fleet locations where a game session queue can place new game sessions. You can use a filter to temporarily exclude specific locations from receiving placements. For queues that have multi-location fleets, you can use a filter configuration allow placement with some, but not all, of a fleet's locations.
" }, "FilterInstanceStatus":{ "type":"string", @@ -6443,7 +6444,7 @@ }, "PlayerLatencies":{ "shape":"PlayerLatencyList", - "documentation":"A set of values, expressed in milliseconds, that indicates the amount of latency that a player experiences when connected to @aws; Regions.
" + "documentation":"A set of values, expressed in milliseconds, that indicates the amount of latency that a player experiences when connected to Amazon Web Services Regions.
" }, "StartTime":{ "shape":"Timestamp", @@ -6476,9 +6477,13 @@ "MatchmakerData":{ "shape":"MatchmakerData", "documentation":"Information on the matchmaking process for this game. Data is in JSON syntax, formatted as a string. It identifies the matchmaking configuration used to create the match, and contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see Match Data.
" + }, + "PriorityConfigurationOverride":{ + "shape":"PriorityConfigurationOverride", + "documentation":"A prioritized list of locations to use with a game session placement request and instructions on how to use it. This list overrides a queue's prioritized location list for a single game session placement request only. The list can include Amazon Web Services Regions, local zones, and custom locations (for Anywhere fleets). The fallback strategy instructs Amazon GameLift to use the override list for the first placement attempt only or for all placement attempts.
" } }, - "documentation":"Represents a potential game session placement, including the full details of the original placement request and the current status.
If the game session placement status is PENDING
, the properties for game session ID/ARN, region, IP address/DNS, and port aren't final. A game session is not active and ready to accept players until placement status reaches FULFILLED
. When the placement is in PENDING
status, Amazon GameLift may attempt to place a game session multiple times before succeeding. With each attempt it creates a https://docs.aws.amazon.com/gamelift/latest/apireference/API_GameSession object and updates this placement object with the new game session properties..
Represents a potential game session placement, including the full details of the original placement request and the current status.
If the game session placement status is PENDING
, the properties for game session ID/ARN, region, IP address/DNS, and port aren't final. A game session is not active and ready to accept players until placement status reaches FULFILLED
. When the placement is in PENDING
status, Amazon GameLift may attempt to place a game session multiple times before succeeding. With each attempt it creates a https://docs.aws.amazon.com/gamelift/latest/apireference/API_GameSession object and updates this placement object with the new game session properties.
A set of values, expressed in milliseconds, that indicates the amount of latency that a player experiences when connected to @aws; Regions. If this property is present, FlexMatch considers placing the match only in Regions for which latency is reported.
If a matchmaker has a rule that evaluates player latency, players must report latency in order to be matched. If no latency is reported in this scenario, FlexMatch assumes that no Regions are available to the player and the ticket is not matchable.
" + "documentation":"A set of values, expressed in milliseconds, that indicates the amount of latency that a player experiences when connected to Amazon Web Services Regions. If this property is present, FlexMatch considers placing the match only in Regions for which latency is reported.
If a matchmaker has a rule that evaluates player latency, players must report latency in order to be matched. If no latency is reported in this scenario, FlexMatch assumes that no Regions are available to the player and the ticket is not matchable.
" } }, "documentation":"Represents a player in matchmaking. When starting a matchmaking request, a player has a player ID, attributes, and may have latency data. Team information is added after a match has been successfully completed.
" @@ -8213,14 +8231,29 @@ "members":{ "PriorityOrder":{ "shape":"PriorityTypeList", - "documentation":"The recommended sequence to use when prioritizing where to place new game sessions. Each type can only be listed once.
LATENCY
-- FleetIQ prioritizes locations where the average player latency (provided in each game session request) is lowest.
COST
-- FleetIQ prioritizes destinations with the lowest current hosting costs. Cost is evaluated based on the location, instance type, and fleet type (Spot or On-Demand) for each destination in the queue.
DESTINATION
-- FleetIQ prioritizes based on the order that destinations are listed in the queue configuration.
LOCATION
-- FleetIQ prioritizes based on the provided order of locations, as defined in LocationOrder
.
A custom sequence to use when prioritizing where to place new game sessions. Each priority type is listed once.
LATENCY
-- Amazon GameLift prioritizes locations where the average player latency is lowest. Player latency data is provided in each game session placement request.
COST
-- Amazon GameLift prioritizes destinations with the lowest current hosting costs. Cost is evaluated based on the location, instance type, and fleet type (Spot or On-Demand) of each destination in the queue.
DESTINATION
-- Amazon GameLift prioritizes based on the list order of destinations in the queue configuration.
LOCATION
-- Amazon GameLift prioritizes based on the provided order of locations, as defined in LocationOrder
.
The prioritization order to use for fleet locations, when the PriorityOrder
property includes LOCATION
. Locations are identified by Amazon Web Services Region codes such as us-west-2
. Each location can only be listed once.
The prioritization order to use for fleet locations, when the PriorityOrder
property includes LOCATION
. Locations can include Amazon Web Services Region codes (such as us-west-2
), local zones, and custom locations (for Anywhere fleets). Each location must be listed only once. For details, see Amazon GameLift service locations.
Custom prioritization settings for a game session queue to use when searching for available game servers to place new game sessions. This configuration replaces the default FleetIQ prioritization process.
By default, a queue makes placements based on the following default prioritizations:
If player latency data is included in a game session request, Amazon GameLift prioritizes placing game sessions where the average player latency is lowest. Amazon GameLift re-orders the queue's destinations and locations (for multi-location fleets) based on the following priorities: (1) the lowest average latency across all players, (2) the lowest hosting cost, (3) the queue's default destination order, and then (4), an alphabetic list of locations.
If player latency data is not included, Amazon GameLift prioritizes placing game sessions in the queue's first destination. If that fleet has multiple locations, the game session is placed on the first location (when listed alphabetically). Amazon GameLift re-orders the queue's destinations and locations (for multi-location fleets) based on the following priorities: (1) the queue's default destination order, and then (2) an alphabetic list of locations.
Instructions for how to use the override list if the first round of placement attempts fails. The first round is a failure if Amazon GameLift searches all listed locations, in all of the queue's destinations, without finding an available hosting resource for a new game session. Valid strategies include:
DEFAULT_AFTER_SINGLE_PASS
-- After the first round of placement attempts, discard the override list and use the queue's default location priority list. Continue to use the queue's default list until the placement request times out.
NONE
-- Continue to use the override list for all rounds of placement attempts until the placement request times out.
A prioritized list of hosting locations. The list can include Amazon Web Services Regions (such as us-west-2
), local zones, and custom locations (for Anywhere fleets). Each location must be listed only once. For details, see Amazon GameLift service locations.
Custom prioritization settings for use by a game session queue when placing new game sessions with available game servers. When defined, this configuration replaces the default FleetIQ prioritization process, which is as follows:
If player latency data is included in a game session request, destinations and locations are prioritized first based on lowest average latency (1), then on lowest hosting cost (2), then on destination list order (3), and finally on location (alphabetical) (4). This approach ensures that the queue's top priority is to place game sessions where average player latency is lowest, and--if latency is the same--where the hosting cost is less, etc.
If player latency data is not included, destinations and locations are prioritized first on destination list order (1), and then on location (alphabetical) (2). This approach ensures that the queue's top priority is to place game sessions on the first destination fleet listed. If that fleet has multiple locations, the game session is placed on the first location (when listed alphabetically).
Changing the priority order will affect how game sessions are placed.
" + "documentation":"An alternate list of prioritized locations for use with a game session queue. When this property is included in a StartGameSessionPlacement request, this list overrides the queue's default location prioritization, as defined in the queue's PriorityConfiguration setting (LocationOrder). This property overrides the queue's default priority list for individual placement requests only. Use this property only with queues that have a PriorityConfiguration
setting that prioritizes first.
A priority configuration override list does not override a queue's FilterConfiguration setting, if the queue has one. Filter configurations are used to limit placements to a subset of the locations in a queue's destinations. If the override list includes a location that's not included in the FilterConfiguration allowed list, Amazon GameLift won't attempt to place a game session there.
A set of values, expressed in milliseconds, that indicates the amount of latency that a player experiences when connected to @aws; Regions. This information is used to try to place the new game session where it can offer the best possible gameplay experience for the players.
" + "documentation":"A set of values, expressed in milliseconds, that indicates the amount of latency that a player experiences when connected to Amazon Web Services Regions. This information is used to try to place the new game session where it can offer the best possible gameplay experience for the players.
" }, "DesiredPlayerSessions":{ "shape":"DesiredPlayerSessionList", @@ -8872,6 +8905,10 @@ "GameSessionData":{ "shape":"LargeGameSessionData", "documentation":"A set of custom game session properties, formatted as a single string value. This data is passed to a game server process with a request to start a new game session. For more information, see Start a game session.
" + }, + "PriorityConfigurationOverride":{ + "shape":"PriorityConfigurationOverride", + "documentation":"A prioritized list of locations to use for the game session placement and instructions on how to use it. This list overrides a queue's prioritized location list for this game session placement request only. You can include Amazon Web Services Regions, local zones, and custom locations (for Anywhere fleets). Choose a fallback strategy to instruct Amazon GameLift to use the override list for the first placement attempt only or for all placement attempts.
" } } }, @@ -9272,7 +9309,7 @@ }, "TerminationMode":{ "shape":"TerminationMode", - "documentation":"The method to use to terminate the game session. Available methods include:
TRIGGER_ON_PROCESS_TERMINATE
– Sends an OnProcessTerminate()
callback to the server process to initiate the normal game session shutdown sequence. At a minimum, the callback method must include a call to the server SDK action ProcessEnding()
, which is how the server process signals that a game session is ending. If the server process doesn't call ProcessEnding()
, this termination method won't be successful.
FORCE_TERMINATE
– Takes action to stop the server process, using existing methods to control how server processes run on an Amazon GameLift managed compute.
This method is not available for game sessions that are running on Anywhere fleets unless the fleet is deployed with the Amazon GameLift Agent. In this scenario, a force terminate request results in an invalid or bad request exception.
The method to use to terminate the game session. Available methods include:
TRIGGER_ON_PROCESS_TERMINATE
– Prompts the Amazon GameLift service to send an OnProcessTerminate()
callback to the server process and initiate the normal game session shutdown sequence. The OnProcessTerminate
method, which is implemented in the game server code, must include a call to the server SDK action ProcessEnding()
, which is how the server process signals to Amazon GameLift that a game session is ending. If the server process doesn't call ProcessEnding()
, the game session termination won't conclude successfully.
FORCE_TERMINATE
– Prompts the Amazon GameLift service to stop the server process immediately. Amazon GameLift takes action (depending on the type of fleet) to shut down the server process without the normal game session shutdown sequence.
This method is not available for game sessions that are running on Anywhere fleets unless the fleet is deployed with the Amazon GameLift Agent. In this scenario, a force terminate request results in an invalid or bad request exception.
Calculates route matrix containing the results for all pairs of Origins to Destinations. Each row corresponds to one entry in Origins. Each entry in the row corresponds to the route from that entry in Origins to an entry in Destinations positions.
" + "documentation":" Use CalculateRouteMatrix
to compute results for all pairs of Origins to Destinations. Each row corresponds to one entry in Origins. Each entry in the row corresponds to the route from that entry in Origins to an entry in Destinations positions.
Calculates a route given the following required parameters: Origin
and Destination
.
CalculateRoutes
computes routes given the following required parameters: Origin
and Destination
.
Calculates the optimal order to travel between a set of waypoints to minimize either the travel time or the distance travelled during the journey, based on road network restrictions and the traffic pattern data.
" + "documentation":" OptimizeWaypoints
calculates the optimal order to travel between a set of waypoints to minimize either the travel time or the distance travelled during the journey, based on road network restrictions and the traffic pattern data.
The SnapToRoads action matches GPS trace to roads most likely traveled on.
" + "documentation":" SnapToRoads
matches GPS trace to roads most likely traveled on.
Features that are allowed while calculating. a route
" + "documentation":"Features that are allowed while calculating an isoline.
" }, "ArrivalTime":{ "shape":"TimestampWithTimezoneOffset", @@ -175,7 +175,7 @@ }, "IsolineGranularity":{ "shape":"IsolineGranularityOptions", - "documentation":"Defines the granularity of the returned Isoline
" + "documentation":"Defines the granularity of the returned Isoline.
" }, "Key":{ "shape":"ApiKey", @@ -201,7 +201,7 @@ }, "Thresholds":{ "shape":"IsolineThresholds", - "documentation":"Threshold to be used for the isoline calculation. Up to 3 thresholds per provided type can be requested.
" + "documentation":"Threshold to be used for the isoline calculation. Up to 3 thresholds per provided type can be requested.
You incur a calculation charge for each threshold. Using a large amount of thresholds in a request can lead you to incur unexpected charges. See Amazon Location's pricing page for more information.
" }, "Traffic":{ "shape":"IsolineTrafficOptions", @@ -267,7 +267,7 @@ "members":{ "Allow":{ "shape":"RouteMatrixAllowOptions", - "documentation":"Features that are allowed while calculating. a route
" + "documentation":"Features that are allowed while calculating a route.
" }, "Avoid":{ "shape":"RouteMatrixAvoidanceOptions", @@ -283,7 +283,7 @@ }, "Destinations":{ "shape":"CalculateRouteMatrixRequestDestinationsList", - "documentation":"List of destinations for the route.
" + "documentation":"List of destinations for the route.
Route calculations are billed for each origin and destination pair. If you use a large matrix of origins and destinations, your costs will increase accordingly. See Amazon Location's pricing page for more information.
The position in longitude and latitude for the origin.
" + "documentation":"The position in longitude and latitude for the origin.
Route calculations are billed for each origin and destination pair. Using a large amount of Origins in a request can lead you to incur unexpected charges. See Amazon Location's pricing page for more information.
Features that are allowed while calculating. a route
" + "documentation":"Features that are allowed while calculating a route.
" }, "ArrivalTime":{ "shape":"TimestampWithTimezoneOffset", @@ -468,7 +468,7 @@ }, "TravelStepType":{ "shape":"RouteTravelStepType", - "documentation":"Type of step returned by the response. Default provides basic steps intended for web based applications. TurnByTurn provides detailed instructions with more granularity intended for a turn based naviagtion system.
" + "documentation":"Type of step returned by the response. Default provides basic steps intended for web based applications. TurnByTurn provides detailed instructions with more granularity intended for a turn based navigation system.
" }, "Waypoints":{ "shape":"RouteWaypointList", @@ -536,6 +536,11 @@ "documentation":"Geometry defined as a circle. When request routing boundary was set as AutoCircle
, the response routing boundary will return Circle
derived from the AutoCircle
settings.
Allow Hot (High Occupancy Toll) lanes while calculating the route.
" + "documentation":"Allow Hot (High Occupancy Toll) lanes while calculating an isoline.
Default value: false
Allow Hov (High Occupancy vehicle) lanes while calculating the route.
" + "documentation":"Allow Hov (High Occupancy vehicle) lanes while calculating an isoline.
Default value: false
Features that are allowed while calculating. a route
" + "documentation":"Features that are allowed while calculating an isoline.
" }, "IsolineAvoidanceArea":{ "type":"structure", @@ -702,7 +707,7 @@ "members":{ "Except":{ "shape":"IsolineAvoidanceAreaGeometryList", - "documentation":"Exceptions to the provided avoidance geometry, to be included while calculating the route.
" + "documentation":"Exceptions to the provided avoidance geometry, to be included while calculating an isoline.
" }, "Geometry":{ "shape":"IsolineAvoidanceAreaGeometry", @@ -735,7 +740,7 @@ "documentation":"A list of PolylinePolygon's that are excluded for calculating isolines, the list can only contain 1 polygon. For more information on polyline encoding, see https://github.com/heremaps/flexiblepolyline/blob/master/README.md.
" } }, - "documentation":"The avoidance geometry, to be included while calculating the route.
" + "documentation":"The avoidance geometry, to be included while calculating an isoline.
" }, "IsolineAvoidanceAreaGeometryList":{ "type":"list", @@ -766,23 +771,23 @@ }, "CarShuttleTrains":{ "shape":"Boolean", - "documentation":"Avoid car-shuttle-trains while calculating the route.
" + "documentation":"Avoid car-shuttle-trains while calculating an isoline.
" }, "ControlledAccessHighways":{ "shape":"Boolean", - "documentation":"Avoid controlled access highways while calculating the route.
" + "documentation":"Avoid controlled access highways while calculating an isoline.
" }, "DirtRoads":{ "shape":"Boolean", - "documentation":"Avoid dirt roads while calculating the route.
" + "documentation":"Avoid dirt roads while calculating an isoline.
" }, "Ferries":{ "shape":"Boolean", - "documentation":"Avoid ferries while calculating the route.
" + "documentation":"Avoid ferries while calculating an isoline.
" }, "SeasonalClosure":{ "shape":"Boolean", - "documentation":"Avoid roads that have seasonal closure while calculating the route.
" + "documentation":"Avoid roads that have seasonal closure while calculating an isoline.
" }, "TollRoads":{ "shape":"Boolean", @@ -798,7 +803,7 @@ }, "Tunnels":{ "shape":"Boolean", - "documentation":"Avoid tunnels while calculating the route.
" + "documentation":"Avoid tunnels while calculating an isoline.
" }, "UTurns":{ "shape":"Boolean", @@ -809,7 +814,7 @@ "documentation":"Zone categories to be avoided.
" } }, - "documentation":"Features that are avoided while calculating a route. Avoidance is on a best-case basis. If an avoidance can't be satisfied for a particular case, it violates the avoidance and the returned response produces a notice for the violation.
" + "documentation":"Features that are avoided while calculating isolines. Avoidance is on a best-case basis. If an avoidance can't be satisfied for a particular case, it violates the avoidance and the returned response produces a notice for the violation.
" }, "IsolineAvoidanceZoneCategory":{ "type":"structure", @@ -848,7 +853,7 @@ "documentation":"The number of occupants in the vehicle.
Default Value: 1
Options for vehicles.
" + "documentation":"Travel mode options when the provided travel mode is Car
.
An ordered list of positions used to plot a route on a map in a lossy compression format.
LineString and Polyline are mutually exclusive properties.
Geometry of the connection between different Isoline components.
" + "documentation":"Geometry of the connection between different isoline components.
" }, "IsolineConnectionList":{ "type":"list", @@ -950,7 +955,7 @@ }, "MaxResolution":{ "shape":"DistanceMeters", - "documentation":"Maximum resolution of the returned isoline.
Unit: centimeters
Maximum resolution of the returned isoline.
Unit: meters
Isoline granularity related options.
" @@ -1038,7 +1043,7 @@ "documentation":"Options to configure matching the provided position to a side of the street.
" } }, - "documentation":"Options for the property.
" + "documentation":"Origin related options.
" }, "IsolineScooterOptions":{ "type":"structure", @@ -1061,7 +1066,7 @@ "documentation":"The number of occupants in the vehicle.
Default Value: 1
Options for the property.
" + "documentation":"Travel mode options when the provided travel mode is Scooter
Time to be used for the isoline calculation.
" } }, - "documentation":"Threshold to be used for the isoline calculation. Up to 3 thresholds per provided type can be requested.
" + "documentation":"Threshold to be used for the isoline calculation. Up to 5 thresholds per provided type can be requested.
" }, "IsolineTrafficOptions":{ "type":"structure", @@ -1177,7 +1182,7 @@ }, "Scooter":{ "shape":"IsolineScooterOptions", - "documentation":"Travel mode options when the provided travel mode is \"Scooter\"
" + "documentation":"Travel mode options when the provided travel mode is Scooter
When travel mode is set to Scooter
, then the avoidance option ControlledAccessHighways
defaults to true
.
Features that are avoided while calculating a route. Avoidance is on a best-case basis. If an avoidance can't be satisfied for a particular case, this setting is ignored.
" + "documentation":"Features that are avoided. Avoidance is on a best-case basis. If an avoidance can't be satisfied for a particular case, this setting is ignored.
" + }, + "Clustering":{ + "shape":"WaypointOptimizationClusteringOptions", + "documentation":"Clustering allows you to specify how nearby waypoints can be clustered to improve the optimized sequence.
" }, "DepartureTime":{ "shape":"TimestampWithTimezoneOffset", @@ -1804,14 +1813,14 @@ "members":{ "Hot":{ "shape":"Boolean", - "documentation":"Allow Hot (High Occupancy Toll) lanes while calculating the route.
" + "documentation":"Allow Hot (High Occupancy Toll) lanes while calculating the route.
Default value: false
Allow Hov (High Occupancy vehicle) lanes while calculating the route.
" + "documentation":"Allow Hov (High Occupancy vehicle) lanes while calculating the route.
Default value: false
Features that are allowed while calculating. a route
" + "documentation":"Features that are allowed while calculating a route.
" }, "RouteAvoidanceArea":{ "type":"structure", @@ -1923,7 +1932,7 @@ "documentation":"Zone categories to be avoided.
" } }, - "documentation":"Options related to areas to be avoided.
" + "documentation":"Specifies options for areas to avoid when calculating the route. This is a best-effort avoidance setting, meaning the router will try to honor the avoidance preferences but may still include restricted areas if no feasible alternative route exists. If avoidance options are not followed, the response will indicate that the avoidance criteria were violated.
" }, "RouteAvoidanceZoneCategory":{ "type":"structure", @@ -1963,7 +1972,7 @@ "documentation":"The number of occupants in the vehicle.
Default Value: 1
Travel mode options when the provided travel mode is \"Car\"
" + "documentation":"Travel mode options when the provided travel mode is Car
.
List of countries to be avoided defined by two-letter or three-letter country codes.
" } }, - "documentation":"Exclusion options for the route.
" + "documentation":"Specifies strict exclusion options for the route calculation. This setting mandates that the router will avoid any routes that include the specified options, rather than merely attempting to minimize them.
" }, "RouteExitStepDetails":{ "type":"structure", @@ -2342,7 +2351,8 @@ "NoSchedule", "Other", "ViolatedAvoidFerry", - "ViolatedAvoidRailFerry" + "ViolatedAvoidRailFerry", + "SeasonalClosure" ] }, "RouteFerryNoticeList":{ @@ -2404,7 +2414,7 @@ }, "Distance":{ "shape":"DistanceMeters", - "documentation":"Distance of the computed span. This feature doesn't split a span, but is always computed on a span split by other properties.
" + "documentation":"Distance of the computed span. This feature doesn't split a span, but is always computed on a span split by other properties.
Unit: meters
Allow Hot (High Occupancy Toll) lanes while calculating the route.
" + "documentation":"Allow Hot (High Occupancy Toll) lanes while calculating the route.
Default value: false
Allow Hov (High Occupancy vehicle) lanes while calculating the route.
" + "documentation":"Allow Hov (High Occupancy vehicle) lanes while calculating the route.
Default value: false
Allow Options related to the route matrix.
" @@ -2834,7 +2845,7 @@ "documentation":"Zone categories to be avoided.
" } }, - "documentation":"Options related to the route matrix.
" + "documentation":"Specifies options for areas to avoid when calculating the route. This is a best-effort avoidance setting, meaning the router will try to honor the avoidance preferences but may still include restricted areas if no feasible alternative route exists. If avoidance options are not followed, the response will indicate that the avoidance criteria were violated.
" }, "RouteMatrixAvoidanceOptionsAreasList":{ "type":"list", @@ -2917,7 +2928,7 @@ "documentation":"The number of occupants in the vehicle.
Default Value: 1
Options related to the car.
" + "documentation":"Travel mode options when the provided travel mode is Car
.
List of countries to be avoided defined by two-letter or three-letter country codes.
" } }, - "documentation":"Exclusion options.
" + "documentation":"Specifies strict exclusion options for the route calculation. This setting mandates that the router will avoid any routes that include the specified options, rather than merely attempting to minimize them.
" }, "RouteMatrixHazardousCargoType":{ "type":"string", @@ -3127,7 +3138,7 @@ "documentation":"The number of occupants in the vehicle.
Default Value: 1
Travel mode options when the provided travel mode is \"Scooter\"
" + "documentation":"Travel mode options when the provided travel mode is Scooter
Travel mode options when the provided travel mode is \"Scooter\"
" + "documentation":"Travel mode options when the provided travel mode is Scooter
When travel mode is set to Scooter
, then the avoidance option ControlledAccessHighways
defaults to true
.
The number of occupants in the vehicle.
Default Value: 1
Travel mode options when the provided travel mode is \"Scooter\"
" + "documentation":"Travel mode options when the provided travel mode is Scooter
Travel mode options when the provided travel mode is \"Scooter\"
" + "documentation":"Travel mode options when the provided travel mode is Scooter
When travel mode is set to Scooter
, then the avoidance option ControlledAccessHighways
defaults to true
.
Avoid U-turns for calculation on highways and motorways.
" } }, - "documentation":"Options for WaypointOptimizationAvoidance.
" + "documentation":"Specifies options for areas to avoid. This is a best-effort avoidance setting, meaning the router will try to honor the avoidance preferences but may still include restricted areas if no feasible alternative route exists. If avoidance options are not followed, the response will indicate that the avoidance criteria were violated.
" }, "WaypointOptimizationAvoidanceOptionsAreasList":{ "type":"list", @@ -5996,6 +6007,28 @@ "max":20, "min":0 }, + "WaypointOptimizationClusteringAlgorithm":{ + "type":"string", + "enum":[ + "DrivingDistance", + "TopologySegment" + ] + }, + "WaypointOptimizationClusteringOptions":{ + "type":"structure", + "required":["Algorithm"], + "members":{ + "Algorithm":{ + "shape":"WaypointOptimizationClusteringAlgorithm", + "documentation":"The algorithm to be used. DrivingDistance
assigns all the waypoints that are within driving distance of each other into a single cluster. TopologySegment
assigns all the waypoints that are within the same topology segment into a single cluster. A Topology segment is a linear stretch of road between two junctions.
Driving distance options to be used when the clustering algorithm is DrivingDistance.
" + } + }, + "documentation":"Options for WaypointOptimizationClustering.
" + }, "WaypointOptimizationConnection":{ "type":"structure", "required":[ @@ -6097,6 +6130,17 @@ }, "documentation":"Driver related options.
" }, + "WaypointOptimizationDrivingDistanceOptions":{ + "type":"structure", + "required":["DrivingDistance"], + "members":{ + "DrivingDistance":{ + "shape":"DistanceMeters", + "documentation":"DrivingDistance assigns all the waypoints that are within driving distance of each other into a single cluster.
" + } + }, + "documentation":"Driving distance related options.
" + }, "WaypointOptimizationExclusionOptions":{ "type":"structure", "required":["Countries"], @@ -6106,7 +6150,7 @@ "documentation":"List of countries to be avoided defined by two-letter or three-letter country codes.
" } }, - "documentation":"Exclusion options.
" + "documentation":"Specifies strict exclusion options for the route calculation. This setting mandates that the router will avoid any routes that include the specified options, rather than merely attempting to minimize them.
" }, "WaypointOptimizationFailedConstraint":{ "type":"structure", @@ -6185,6 +6229,10 @@ "shape":"TimestampWithTimezoneOffset", "documentation":"Estimated time of arrival at the destination.
Time format:YYYY-MM-DDThh:mm:ss.sssZ | YYYY-MM-DDThh:mm:ss.sss+hh:mm
Examples:
2020-04-22T17:57:24Z
2020-04-22T17:57:24+02:00
Index of the cluster the waypoint is associated with. The index is included in the response only if clustering was performed while processing the request.
" + }, "DepartureTime":{ "shape":"TimestampWithTimezoneOffset", "documentation":"Estimated time of departure from thr origin.
Time format:YYYY-MM-DDThh:mm:ss.sssZ | YYYY-MM-DDThh:mm:ss.sss+hh:mm
Examples:
2020-04-22T17:57:24Z
2020-04-22T17:57:24+02:00
The Origin Id.
" } }, - "documentation":"Options related to the origin.
" + "documentation":"Origin related options.
" }, "WaypointOptimizationPedestrianOptions":{ "type":"structure", diff --git a/botocore/data/glue/2017-03-31/service-2.json b/botocore/data/glue/2017-03-31/service-2.json index eada834ebc..1c3dbb89ec 100644 --- a/botocore/data/glue/2017-03-31/service-2.json +++ b/botocore/data/glue/2017-03-31/service-2.json @@ -4342,7 +4342,7 @@ }, "Timeout":{ "shape":"Timeout", - "documentation":"The JobRun
timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT
status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
The JobRun
timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT
status. This overrides the timeout value set in the parent job.
Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the jobs will throw an exception.
When the value is left blank, the timeout is defaulted to 2880 minutes.
Any existing Glue jobs that had a timeout value greater than 7 days will be defaulted to 7 days. For instance if you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day.
For streaming jobs, if you have set up a maintenance window, it will be restarted during the maintenance window after 7 days.
" }, "SecurityConfiguration":{ "shape":"NameString", @@ -9156,7 +9156,7 @@ }, "Timeout":{ "shape":"Timeout", - "documentation":"The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT
status. The default is 2,880 minutes (48 hours) for batch jobs.
Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days.
" + "documentation":"The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT
status.
Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the jobs will throw an exception.
When the value is left blank, the timeout is defaulted to 2880 minutes.
Any existing Glue jobs that had a timeout value greater than 7 days will be defaulted to 7 days. For instance if you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day.
For streaming jobs, if you have set up a maintenance window, it will be restarted during the maintenance window after 7 days.
" }, "MaxCapacity":{ "shape":"NullableDouble", @@ -9588,7 +9588,7 @@ }, "Timeout":{ "shape":"Timeout", - "documentation":"The number of minutes before session times out. Default for Spark ETL jobs is 48 hours (2880 minutes), the maximum session lifetime for this job type. Consult the documentation for other job types.
" + "documentation":"The number of minutes before session times out. Default for Spark ETL jobs is 48 hours (2880 minutes). Consult the documentation for other job types.
" }, "IdleTimeout":{ "shape":"Timeout", @@ -17256,7 +17256,7 @@ }, "Timeout":{ "shape":"Timeout", - "documentation":"The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT
status. The default is 2,880 minutes (48 hours) for batch jobs.
Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days.
" + "documentation":"The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT
status.
Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the jobs will throw an exception.
When the value is left blank, the timeout is defaulted to 2880 minutes.
Any existing Glue jobs that had a timeout value greater than 7 days will be defaulted to 7 days. For instance if you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day.
For streaming jobs, if you have set up a maintenance window, it will be restarted during the maintenance window after 7 days.
" }, "MaxCapacity":{ "shape":"NullableDouble", @@ -17480,7 +17480,7 @@ }, "Timeout":{ "shape":"Timeout", - "documentation":"The JobRun
timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT
status. This value overrides the timeout value set in the parent job.
Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the jobs will throw an exception.
When the value is left blank, the timeout is defaulted to 2880 minutes.
Any existing Glue jobs that had a timeout value greater than 7 days will be defaulted to 7 days. For instance if you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day.
" + "documentation":"The JobRun
timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT
status. This value overrides the timeout value set in the parent job.
Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the jobs will throw an exception.
When the value is left blank, the timeout is defaulted to 2880 minutes.
Any existing Glue jobs that had a timeout value greater than 7 days will be defaulted to 7 days. For instance if you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day.
For streaming jobs, if you have set up a maintenance window, it will be restarted during the maintenance window after 7 days.
" }, "MaxCapacity":{ "shape":"NullableDouble", @@ -17607,7 +17607,7 @@ }, "Timeout":{ "shape":"Timeout", - "documentation":"The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT
status. The default is 2,880 minutes (48 hours) for batch jobs.
Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days.
" + "documentation":"The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT
status.
Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the jobs will throw an exception.
When the value is left blank, the timeout is defaulted to 2880 minutes.
Any existing Glue jobs that had a timeout value greater than 7 days will be defaulted to 7 days. For instance if you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day.
For streaming jobs, if you have set up a maintenance window, it will be restarted during the maintenance window after 7 days.
" }, "MaxCapacity":{ "shape":"NullableDouble", @@ -23718,7 +23718,7 @@ }, "Timeout":{ "shape":"Timeout", - "documentation":"The JobRun
timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT
status. This value overrides the timeout value set in the parent job.
Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the jobs will throw an exception.
When the value is left blank, the timeout is defaulted to 2880 minutes.
Any existing Glue jobs that had a timeout value greater than 7 days will be defaulted to 7 days. For instance if you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day.
" + "documentation":"The JobRun
timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT
status. This value overrides the timeout value set in the parent job.
Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the jobs will throw an exception.
When the value is left blank, the timeout is defaulted to 2880 minutes.
Any existing Glue jobs that had a timeout value greater than 7 days will be defaulted to 7 days. For instance if you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day.
For streaming jobs, if you have set up a maintenance window, it will be restarted during the maintenance window after 7 days.
" }, "MaxCapacity":{ "shape":"NullableDouble", diff --git a/botocore/data/healthlake/2017-07-01/service-2.json b/botocore/data/healthlake/2017-07-01/service-2.json index 8980124fef..7a719fa918 100644 --- a/botocore/data/healthlake/2017-07-01/service-2.json +++ b/botocore/data/healthlake/2017-07-01/service-2.json @@ -5,13 +5,15 @@ "endpointPrefix":"healthlake", "jsonVersion":"1.0", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"HealthLake", "serviceFullName":"Amazon HealthLake", "serviceId":"HealthLake", "signatureVersion":"v4", "signingName":"healthlake", "targetPrefix":"HealthLake", - "uid":"healthlake-2017-07-01" + "uid":"healthlake-2017-07-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "CreateFHIRDatastore":{ @@ -241,6 +243,7 @@ "type":"string", "enum":[ "SMART_ON_FHIR_V1", + "SMART_ON_FHIR", "AWS_AUTH" ] }, @@ -807,6 +810,7 @@ "type":"string", "enum":[ "SUBMITTED", + "QUEUED", "IN_PROGRESS", "COMPLETED_WITH_ERRORS", "COMPLETED", @@ -1073,8 +1077,7 @@ "required":[ "OutputDataConfig", "DatastoreId", - "DataAccessRoleArn", - "ClientToken" + "DataAccessRoleArn" ], "members":{ "JobName":{ @@ -1127,8 +1130,7 @@ "InputDataConfig", "JobOutputDataConfig", "DatastoreId", - "DataAccessRoleArn", - "ClientToken" + "DataAccessRoleArn" ], "members":{ "JobName":{ diff --git a/botocore/data/iot/2015-05-28/service-2.json b/botocore/data/iot/2015-05-28/service-2.json index 4162a60d22..1d237434e8 100644 --- a/botocore/data/iot/2015-05-28/service-2.json +++ b/botocore/data/iot/2015-05-28/service-2.json @@ -17582,7 +17582,7 @@ }, "ParameterValue":{ "type":"string", - "max":512, + "max":30720, "min":1, "pattern":"[^\\p{C}]+" }, diff --git a/botocore/data/iotsitewise/2019-12-02/service-2.json b/botocore/data/iotsitewise/2019-12-02/service-2.json index 721158573e..2d97cd3c17 100644 --- a/botocore/data/iotsitewise/2019-12-02/service-2.json +++ b/botocore/data/iotsitewise/2019-12-02/service-2.json @@ -3583,6 +3583,10 @@ "type":"structure", "required":["entries"], "members":{ + "enablePartialEntryProcessing":{ + "shape":"BooleanValue", + "documentation":"This setting enables partial ingestion at entry-level. If set to true
, we ingest all TQVs not resulting in an error. If set to false
, an invalid TQV fails ingestion of the entire entry that contains it.
The list of asset property value entries for the batch put request. You can specify up to 10 entries per request.
" @@ -3599,6 +3603,7 @@ } } }, + "BooleanValue":{"type":"boolean"}, "Bucket":{ "type":"string", "max":63, @@ -6023,6 +6028,10 @@ "warmTierRetentionPeriod":{ "shape":"WarmTierRetentionPeriod", "documentation":"Set this period to specify how long your data is stored in the warm tier before it is deleted. You can set this only if cold tier is enabled.
" + }, + "disallowIngestNullNaN":{ + "shape":"DisallowIngestNullNaN", + "documentation":"Describes the configuration for ingesting NULL and NaN data. By default the feature is allowed. The feature is disallowed if the value is true
.
The type of null asset property data.
" + } + }, + "documentation":"The value type of null asset property data with BAD and UNCERTAIN qualities.
" + }, "PropertyValueStringValue":{"type":"string"}, "PutAssetPropertyValueEntries":{ "type":"list", @@ -8652,6 +8670,10 @@ "warmTierRetentionPeriod":{ "shape":"WarmTierRetentionPeriod", "documentation":"Set this period to specify how long your data is stored in the warm tier before it is deleted. You can set this only if cold tier is enabled.
" + }, + "disallowIngestNullNaN":{ + "shape":"DisallowIngestNullNaN", + "documentation":"Describes the configuration for ingesting NULL and NaN data. By default the feature is allowed. The feature is disallowed if the value is true
.
Set this period to specify how long your data is stored in the warm tier before it is deleted. You can set this only if cold tier is enabled.
" + }, + "disallowIngestNullNaN":{ + "shape":"DisallowIngestNullNaN", + "documentation":"Describes the configuration for ingesting NULL and NaN data. By default the feature is allowed. The feature is disallowed if the value is true
.
Asset property data of type string (sequence of characters).
" + "documentation":"Asset property data of type string (sequence of characters). The allowed pattern: \"^$|[^\\u0000-\\u001F\\u007F]+\". The max length is 1024.
" }, "integerValue":{ "shape":"PropertyValueIntegerValue", @@ -9781,11 +9817,15 @@ }, "doubleValue":{ "shape":"PropertyValueDoubleValue", - "documentation":"Asset property data of type double (floating point number).
" + "documentation":"Asset property data of type double (floating point number). The min value is -10^10. The max value is 10^10. Double.NaN is allowed.
" }, "booleanValue":{ "shape":"PropertyValueBooleanValue", "documentation":"Asset property data of type Boolean (true or false).
" + }, + "nullValue":{ + "shape":"PropertyValueNullValue", + "documentation":"The type of null asset property data with BAD and UNCERTAIN qualities.
" } }, "documentation":"Contains an asset property value (of a single type only).
" diff --git a/botocore/data/kafkaconnect/2021-09-14/paginators-1.json b/botocore/data/kafkaconnect/2021-09-14/paginators-1.json index 489a00d61f..d84b26fca9 100644 --- a/botocore/data/kafkaconnect/2021-09-14/paginators-1.json +++ b/botocore/data/kafkaconnect/2021-09-14/paginators-1.json @@ -17,6 +17,12 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "workerConfigurations" + }, + "ListConnectorOperations": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "connectorOperations" } } } diff --git a/botocore/data/kafkaconnect/2021-09-14/service-2.json b/botocore/data/kafkaconnect/2021-09-14/service-2.json index 8cf60705cd..4cb6bb79e6 100644 --- a/botocore/data/kafkaconnect/2021-09-14/service-2.json +++ b/botocore/data/kafkaconnect/2021-09-14/service-2.json @@ -2,9 +2,10 @@ "version":"2.0", "metadata":{ "apiVersion":"2021-09-14", + "auth":["aws.auth#sigv4"], "endpointPrefix":"kafkaconnect", - "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceAbbreviation":"Kafka Connect", "serviceFullName":"Managed Streaming for Kafka Connect", "serviceId":"KafkaConnect", @@ -159,6 +160,26 @@ ], "documentation":"Returns summary information about the connector.
" }, + "DescribeConnectorOperation":{ + "name":"DescribeConnectorOperation", + "http":{ + "method":"GET", + "requestUri":"/v1/connectorOperations/{connectorOperationArn}", + "responseCode":200 + }, + "input":{"shape":"DescribeConnectorOperationRequest"}, + "output":{"shape":"DescribeConnectorOperationResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"Returns information about the specified connector's operations.
" + }, "DescribeCustomPlugin":{ "name":"DescribeCustomPlugin", "http":{ @@ -199,6 +220,26 @@ ], "documentation":"Returns information about a worker configuration.
" }, + "ListConnectorOperations":{ + "name":"ListConnectorOperations", + "http":{ + "method":"GET", + "requestUri":"/v1/connectors/{connectorArn}/operations", + "responseCode":200 + }, + "input":{"shape":"ListConnectorOperationsRequest"}, + "output":{"shape":"ListConnectorOperationsResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"Lists information about a connector's operation(s).
" + }, "ListConnectors":{ "name":"ListConnectors", "http":{ @@ -386,7 +427,7 @@ ], "members":{ "maxWorkerCount":{ - "shape":"__integerMin1Max10", + "shape":"__integer", "documentation":"The maximum number of workers allocated to the connector.
" }, "mcuCount":{ @@ -394,7 +435,7 @@ "documentation":"The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.
" }, "minWorkerCount":{ - "shape":"__integerMin1Max10", + "shape":"__integer", "documentation":"The minimum number of workers allocated to the connector.
" }, "scaleInPolicy":{ @@ -445,7 +486,7 @@ ], "members":{ "maxWorkerCount":{ - "shape":"__integerMin1Max10", + "shape":"__integer", "documentation":"The target maximum number of workers allocated to the connector.
" }, "mcuCount":{ @@ -453,7 +494,7 @@ "documentation":"The target number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.
" }, "minWorkerCount":{ - "shape":"__integerMin1Max10", + "shape":"__integer", "documentation":"The target minimum number of workers allocated to the connector.
" }, "scaleInPolicy":{ @@ -562,6 +603,99 @@ }, "exception":true }, + "ConnectorConfiguration":{ + "type":"map", + "key":{"shape":"__string"}, + "value":{"shape":"__string"}, + "sensitive":true + }, + "ConnectorConfigurationUpdate":{ + "type":"map", + "key":{"shape":"__string"}, + "value":{"shape":"__string"}, + "sensitive":true + }, + "ConnectorOperationState":{ + "type":"string", + "enum":[ + "PENDING", + "UPDATE_IN_PROGRESS", + "UPDATE_COMPLETE", + "UPDATE_FAILED", + "ROLLBACK_IN_PROGRESS", + "ROLLBACK_FAILED", + "ROLLBACK_COMPLETE" + ] + }, + "ConnectorOperationStep":{ + "type":"structure", + "members":{ + "stepType":{ + "shape":"ConnectorOperationStepType", + "documentation":"The step type of the operation.
" + }, + "stepState":{ + "shape":"ConnectorOperationStepState", + "documentation":"The step state of the operation.
" + } + }, + "documentation":"Details of a step that is involved in a connector's operation.
" + }, + "ConnectorOperationStepState":{ + "type":"string", + "enum":[ + "PENDING", + "IN_PROGRESS", + "COMPLETED", + "FAILED", + "CANCELLED" + ] + }, + "ConnectorOperationStepType":{ + "type":"string", + "enum":[ + "INITIALIZE_UPDATE", + "FINALIZE_UPDATE", + "UPDATE_WORKER_SETTING", + "UPDATE_CONNECTOR_CONFIGURATION", + "VALIDATE_UPDATE" + ] + }, + "ConnectorOperationSummary":{ + "type":"structure", + "members":{ + "connectorOperationArn":{ + "shape":"__string", + "documentation":"The Amazon Resource Name (ARN) of the connector operation.
" + }, + "connectorOperationType":{ + "shape":"ConnectorOperationType", + "documentation":"The type of connector operation performed.
" + }, + "connectorOperationState":{ + "shape":"ConnectorOperationState", + "documentation":"The state of the connector operation.
" + }, + "creationTime":{ + "shape":"__timestampIso8601", + "documentation":"The time when operation was created.
" + }, + "endTime":{ + "shape":"__timestampIso8601", + "documentation":"The time when operation ended.
" + } + }, + "documentation":"Summary of a connector operation.
" + }, + "ConnectorOperationType":{ + "type":"string", + "enum":[ + "UPDATE_WORKER_SETTING", + "UPDATE_CONNECTOR_CONFIGURATION", + "ISOLATE_CONNECTOR", + "RESTORE_CONNECTOR" + ] + }, "ConnectorState":{ "type":"string", "enum":[ @@ -657,7 +791,7 @@ "documentation":"Information about the capacity allocated to the connector. Exactly one of the two properties must be specified.
" }, "connectorConfiguration":{ - "shape":"__sensitive__mapOf__string", + "shape":"ConnectorConfiguration", "documentation":"A map of keys to values that represent the configuration for the connector.
" }, "connectorDescription":{ @@ -696,13 +830,13 @@ "shape":"__string", "documentation":"The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.
" }, - "tags":{ - "shape":"Tags", - "documentation":"The tags you want to attach to the connector.
" - }, "workerConfiguration":{ "shape":"WorkerConfiguration", "documentation":"Specifies which worker configuration to use with the connector.
" + }, + "tags":{ + "shape":"Tags", + "documentation":"The tags you want to attach to the connector.
" } } }, @@ -1050,6 +1184,68 @@ } } }, + "DescribeConnectorOperationRequest":{ + "type":"structure", + "required":["connectorOperationArn"], + "members":{ + "connectorOperationArn":{ + "shape":"__string", + "documentation":"ARN of the connector operation to be described.
", + "location":"uri", + "locationName":"connectorOperationArn" + } + } + }, + "DescribeConnectorOperationResponse":{ + "type":"structure", + "members":{ + "connectorArn":{ + "shape":"__string", + "documentation":"The Amazon Resource Name (ARN) of the connector.
" + }, + "connectorOperationArn":{ + "shape":"__string", + "documentation":"The Amazon Resource Name (ARN) of the connector operation.
" + }, + "connectorOperationState":{ + "shape":"ConnectorOperationState", + "documentation":"The state of the connector operation.
" + }, + "connectorOperationType":{ + "shape":"ConnectorOperationType", + "documentation":"The type of connector operation performed.
" + }, + "operationSteps":{ + "shape":"__listOfConnectorOperationStep", + "documentation":"The array of operation steps taken.
" + }, + "originWorkerSetting":{ + "shape":"WorkerSetting", + "documentation":"The origin worker setting.
" + }, + "originConnectorConfiguration":{ + "shape":"ConnectorConfiguration", + "documentation":"The origin connector configuration.
" + }, + "targetWorkerSetting":{ + "shape":"WorkerSetting", + "documentation":"The target worker setting.
" + }, + "targetConnectorConfiguration":{ + "shape":"ConnectorConfiguration", + "documentation":"The target connector configuration.
" + }, + "errorInfo":{"shape":"StateDescription"}, + "creationTime":{ + "shape":"__timestampIso8601", + "documentation":"The time when the operation was created.
" + }, + "endTime":{ + "shape":"__timestampIso8601", + "documentation":"The time when the operation ended.
" + } + } + }, "DescribeConnectorRequest":{ "type":"structure", "required":["connectorArn"], @@ -1074,7 +1270,7 @@ "documentation":"The Amazon Resource Name (ARN) of the connector.
" }, "connectorConfiguration":{ - "shape":"__sensitive__mapOf__string", + "shape":"ConnectorConfiguration", "documentation":"A map of keys to values that represent the configuration for the connector.
" }, "connectorDescription":{ @@ -1125,13 +1321,13 @@ "shape":"__string", "documentation":"The Amazon Resource Name (ARN) of the IAM role used by the connector to access Amazon Web Services resources.
" }, - "stateDescription":{ - "shape":"StateDescription", - "documentation":"Details about the state of a connector.
" - }, "workerConfiguration":{ "shape":"WorkerConfigurationDescription", "documentation":"Specifies which worker configuration was used for the connector.
" + }, + "stateDescription":{ + "shape":"StateDescription", + "documentation":"Details about the state of a connector.
" } } }, @@ -1349,6 +1545,43 @@ "TLS" ] }, + "ListConnectorOperationsRequest":{ + "type":"structure", + "required":["connectorArn"], + "members":{ + "connectorArn":{ + "shape":"__string", + "documentation":"The Amazon Resource Name (ARN) of the connector for which to list operations.
", + "location":"uri", + "locationName":"connectorArn" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"Maximum number of connector operations to fetch in one get request.
", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"__string", + "documentation":"If the response is truncated, it includes a NextToken. Send this NextToken in a subsequent request to continue listing from where it left off.
", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListConnectorOperationsResponse":{ + "type":"structure", + "members":{ + "connectorOperations":{ + "shape":"__listOfConnectorOperationSummary", + "documentation":"An array of connector operation descriptions.
" + }, + "nextToken":{ + "shape":"__string", + "documentation":"If the response is truncated, it includes a NextToken. Send this NextToken in a subsequent request to continue listing from where it left off.
" + } + } + }, "ListConnectorsRequest":{ "type":"structure", "members":{ @@ -1394,17 +1627,17 @@ "location":"querystring", "locationName":"maxResults" }, - "namePrefix":{ - "shape":"__string", - "documentation":"Lists custom plugin names that start with the specified text string.
", - "location":"querystring", - "locationName":"namePrefix" - }, "nextToken":{ "shape":"__string", "documentation":"If the response of a ListCustomPlugins operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.
", "location":"querystring", "locationName":"nextToken" + }, + "namePrefix":{ + "shape":"__string", + "documentation":"Lists custom plugin names that start with the specified text string.
", + "location":"querystring", + "locationName":"namePrefix" } } }, @@ -1451,17 +1684,17 @@ "location":"querystring", "locationName":"maxResults" }, - "namePrefix":{ - "shape":"__string", - "documentation":"Lists worker configuration names that start with the specified text string.
", - "location":"querystring", - "locationName":"namePrefix" - }, "nextToken":{ "shape":"__string", "documentation":"If the response of a ListWorkerConfigurations operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.
", "location":"querystring", "locationName":"nextToken" + }, + "namePrefix":{ + "shape":"__string", + "documentation":"Lists worker configuration names that start with the specified text string.
", + "location":"querystring", + "locationName":"namePrefix" } } }, @@ -1501,6 +1734,7 @@ }, "MaxResults":{ "type":"integer", + "box":true, "max":100, "min":1 }, @@ -1549,7 +1783,7 @@ "documentation":"The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.
" }, "workerCount":{ - "shape":"__integerMin1Max10", + "shape":"__integer", "documentation":"The number of workers that are allocated to the connector.
" } }, @@ -1581,7 +1815,7 @@ "documentation":"The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.
" }, "workerCount":{ - "shape":"__integerMin1Max10", + "shape":"__integer", "documentation":"The number of workers that are allocated to the connector.
" } }, @@ -1852,7 +2086,6 @@ "UpdateConnectorRequest":{ "type":"structure", "required":[ - "capacity", "connectorArn", "currentVersion" ], @@ -1861,6 +2094,10 @@ "shape":"CapacityUpdate", "documentation":"The target capacity.
" }, + "connectorConfiguration":{ + "shape":"ConnectorConfigurationUpdate", + "documentation":"A map of keys to values that represent the configuration for the connector.
" + }, "connectorArn":{ "shape":"__string", "documentation":"The Amazon Resource Name (ARN) of the connector that you want to update.
", @@ -1885,6 +2122,10 @@ "connectorState":{ "shape":"ConnectorState", "documentation":"The state of the connector.
" + }, + "connectorOperationArn":{ + "shape":"__string", + "documentation":"The Amazon Resource Name (ARN) of the connector operation.
" } } }, @@ -2062,13 +2303,15 @@ }, "documentation":"Workers can send worker logs to different destination types. This configuration specifies the details of these destinations.
" }, + "WorkerSetting":{ + "type":"structure", + "members":{ + "capacity":{"shape":"CapacityDescription"} + }, + "documentation":"Details about worker setting of a connector
" + }, "__boolean":{"type":"boolean"}, "__integer":{"type":"integer"}, - "__integerMin1Max10":{ - "type":"integer", - "max":10, - "min":1 - }, "__integerMin1Max100":{ "type":"integer", "max":100, @@ -2079,6 +2322,14 @@ "max":8, "min":1 }, + "__listOfConnectorOperationStep":{ + "type":"list", + "member":{"shape":"ConnectorOperationStep"} + }, + "__listOfConnectorOperationSummary":{ + "type":"list", + "member":{"shape":"ConnectorOperationSummary"} + }, "__listOfConnectorSummary":{ "type":"list", "member":{"shape":"ConnectorSummary"} @@ -2113,12 +2364,6 @@ "type":"string", "sensitive":true }, - "__sensitive__mapOf__string":{ - "type":"map", - "key":{"shape":"__string"}, - "value":{"shape":"__string"}, - "sensitive":true - }, "__string":{"type":"string"}, "__stringMax1024":{ "type":"string", diff --git a/botocore/data/kafkaconnect/2021-09-14/waiters-2.json b/botocore/data/kafkaconnect/2021-09-14/waiters-2.json new file mode 100644 index 0000000000..13f60ee66b --- /dev/null +++ b/botocore/data/kafkaconnect/2021-09-14/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/botocore/data/logs/2014-03-28/service-2.json b/botocore/data/logs/2014-03-28/service-2.json index b31f879823..0c2b284698 100644 --- a/botocore/data/logs/2014-03-28/service-2.json +++ b/botocore/data/logs/2014-03-28/service-2.json @@ -27,7 +27,7 @@ {"shape":"OperationAbortedException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"Associates the specified KMS key with either one log group in the account, or with all stored CloudWatch Logs query insights results in the account.
When you use AssociateKmsKey
, you specify either the logGroupName
parameter or the resourceIdentifier
parameter. You can't specify both of those parameters in the same operation.
Specify the logGroupName
parameter to cause all log events stored in the log group to be encrypted with that key. Only the log events ingested after the key is associated are encrypted with that key.
Associating a KMS key with a log group overrides any existing associations between the log group and a KMS key. After a KMS key is associated with a log group, all newly ingested data for the log group is encrypted using the KMS key. This association is stored as long as the data encrypted with the KMS key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested.
Associating a key with a log group does not cause the results of queries of that log group to be encrypted with that key. To have query results encrypted with a KMS key, you must use an AssociateKmsKey
operation with the resourceIdentifier
parameter that specifies a query-result
resource.
Specify the resourceIdentifier
parameter with a query-result
resource, to use that key to encrypt the stored results of all future StartQuery operations in the account. The response from a GetQueryResults operation will still return the query results in plain text.
Even if you have not associated a key with your query results, the query results are encrypted when stored, using the default CloudWatch Logs method.
If you run a query from a monitoring account that queries logs in a source account, the query results key from the monitoring account, if any, is used.
If you delete the key that is used to encrypt log events or log group query results, then all the associated stored log events or query results that were encrypted with that key will be unencryptable and unusable.
CloudWatch Logs supports only symmetric KMS keys. Do not use an associate an asymmetric KMS key with your log group or query results. For more information, see Using Symmetric and Asymmetric Keys.
It can take up to 5 minutes for this operation to take effect.
If you attempt to associate a KMS key with a log group but the KMS key does not exist or the KMS key is disabled, you receive an InvalidParameterException
error.
Associates the specified KMS key with either one log group in the account, or with all stored CloudWatch Logs query insights results in the account.
When you use AssociateKmsKey
, you specify either the logGroupName
parameter or the resourceIdentifier
parameter. You can't specify both of those parameters in the same operation.
Specify the logGroupName
parameter to cause log events ingested into that log group to be encrypted with that key. Only the log events ingested after the key is associated are encrypted with that key.
Associating a KMS key with a log group overrides any existing associations between the log group and a KMS key. After a KMS key is associated with a log group, all newly ingested data for the log group is encrypted using the KMS key. This association is stored as long as the data encrypted with the KMS key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested.
Associating a key with a log group does not cause the results of queries of that log group to be encrypted with that key. To have query results encrypted with a KMS key, you must use an AssociateKmsKey
operation with the resourceIdentifier
parameter that specifies a query-result
resource.
Specify the resourceIdentifier
parameter with a query-result
resource, to use that key to encrypt the stored results of all future StartQuery operations in the account. The response from a GetQueryResults operation will still return the query results in plain text.
Even if you have not associated a key with your query results, the query results are encrypted when stored, using the default CloudWatch Logs method.
If you run a query from a monitoring account that queries logs in a source account, the query results key from the monitoring account, if any, is used.
If you delete the key that is used to encrypt log events or log group query results, then all the associated stored log events or query results that were encrypted with that key will be unencryptable and unusable.
CloudWatch Logs supports only symmetric KMS keys. Do not use an associate an asymmetric KMS key with your log group or query results. For more information, see Using Symmetric and Asymmetric Keys.
It can take up to 5 minutes for this operation to take effect.
If you attempt to associate a KMS key with a log group but the KMS key does not exist or the KMS key is disabled, you receive an InvalidParameterException
error.
Creates an export task so that you can efficiently export data from a log group to an Amazon S3 bucket. When you perform a CreateExportTask
operation, you must use credentials that have permission to write to the S3 bucket that you specify as the destination.
Exporting log data to S3 buckets that are encrypted by KMS is supported. Exporting log data to Amazon S3 buckets that have S3 Object Lock enabled with a retention period is also supported.
Exporting to S3 buckets that are encrypted with AES-256 is supported.
This is an asynchronous call. If all the required information is provided, this operation initiates an export task and responds with the ID of the task. After the task has started, you can use DescribeExportTasks to get the status of the export task. Each account can only have one active (RUNNING
or PENDING
) export task at a time. To cancel an export task, use CancelExportTask.
You can export logs from multiple log groups or multiple time ranges to the same S3 bucket. To separate log data for each export task, specify a prefix to be used as the Amazon S3 key prefix for all exported objects.
Time-based sorting on chunks of log data inside an exported file is not guaranteed. You can sort the exported log field data by using Linux utilities.
Creates an export task so that you can efficiently export data from a log group to an Amazon S3 bucket. When you perform a CreateExportTask
operation, you must use credentials that have permission to write to the S3 bucket that you specify as the destination.
Exporting log data to S3 buckets that are encrypted by KMS is supported. Exporting log data to Amazon S3 buckets that have S3 Object Lock enabled with a retention period is also supported.
Exporting to S3 buckets that are encrypted with AES-256 is supported.
This is an asynchronous call. If all the required information is provided, this operation initiates an export task and responds with the ID of the task. After the task has started, you can use DescribeExportTasks to get the status of the export task. Each account can only have one active (RUNNING
or PENDING
) export task at a time. To cancel an export task, use CancelExportTask.
You can export logs from multiple log groups or multiple time ranges to the same S3 bucket. To separate log data for each export task, specify a prefix to be used as the Amazon S3 key prefix for all exported objects.
We recommend that you don't regularly export to Amazon S3 as a way to continuously archive your logs. For that use case, we instaed recommend that you use subscriptions. For more information about subscriptions, see Real-time processing of log data with subscriptions.
Time-based sorting on chunks of log data inside an exported file is not guaranteed. You can sort the exported log field data by using Linux utilities.
Deletes s delivery. A delivery is a connection between a logical delivery source and a logical delivery destination. Deleting a delivery only deletes the connection between the delivery source and delivery destination. It does not delete the delivery destination or the delivery source.
" + "documentation":"Deletes a delivery. A delivery is a connection between a logical delivery source and a logical delivery destination. Deleting a delivery only deletes the connection between the delivery source and delivery destination. It does not delete the delivery destination or the delivery source.
" }, "DeleteDeliveryDestination":{ "name":"DeleteDeliveryDestination", @@ -422,7 +422,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"Returns a list of all CloudWatch Logs account policies in the account.
" + "documentation":"Returns a list of all CloudWatch Logs account policies in the account.
To use this operation, you must be signed on with the correct permissions depending on the type of policy that you are retrieving information for.
To see data protection policies, you must have the logs:GetDataProtectionPolicy
and logs:DescribeAccountPolicies
permissions.
To see subscription filter policies, you must have the logs:DescrubeSubscriptionFilters
and logs:DescribeAccountPolicies
permissions.
To see transformer policies, you must have the logs:GetTransformer
and logs:DescribeAccountPolicies
permissions.
To see field index policies, you must have the logs:DescribeIndexPolicies
and logs:DescribeAccountPolicies
permissions.
Lists the log streams for the specified log group. You can list all the log streams or filter the results by prefix. You can also control how the results are ordered.
You can specify the log group to search by using either logGroupIdentifier
or logGroupName
. You must include one of these two parameters, but you can't include both.
This operation has a limit of five transactions per second, after which transactions are throttled.
If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.
" + "documentation":"Lists the log streams for the specified log group. You can list all the log streams or filter the results by prefix. You can also control how the results are ordered.
You can specify the log group to search by using either logGroupIdentifier
or logGroupName
. You must include one of these two parameters, but you can't include both.
This operation has a limit of 25 transactions per second, after which transactions are throttled.
If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.
" }, "DescribeMetricFilters":{ "name":"DescribeMetricFilters", @@ -980,7 +980,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"LimitExceededException"} ], - "documentation":"Creates an account-level data protection policy, subscription filter policy, or field index policy that applies to all log groups or a subset of log groups in the account.
Data protection policy
A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy.
Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked.
If you use PutAccountPolicy
to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked.
By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask
permission can use a GetLogEvents or FilterLogEvents operation with the unmask
parameter set to true
to view the unmasked log events. Users with the logs:Unmask
can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask
query command.
For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking.
To use the PutAccountPolicy
operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy
and logs:PutAccountPolicy
permissions.
The PutAccountPolicy
operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked.
Subscription filter policy
A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.
The following destinations are supported for subscription filters:
An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.
An Firehose data stream in the same account as the subscription policy, for same-account delivery.
A Lambda function in the same account as the subscription policy, for same-account delivery.
A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations.
Each account can have one account-level subscription filter policy per Region. If you are updating an existing filter, you must specify the correct name in PolicyName
. To perform a PutAccountPolicy
subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole
permission.
Transformer policy
Creates or updates a log transformer policy for your account. You use log transformers to transform log events into a different format, making them easier for you to process and analyze. You can also transform logs from different sources into standardized formats that contain relevant, source-specific information. After you have created a transformer, CloudWatch Logs performs this transformation at the time of log ingestion. You can then refer to the transformed versions of the logs during operations such as querying with CloudWatch Logs Insights or creating metric filters or subscription filters.
You can also use a transformer to copy metadata from metadata keys into the log events themselves. This metadata can include log group name, log stream name, account ID and Region.
A transformer for a log group is a series of processors, where each processor applies one type of transformation to the log events ingested into this log group. For more information about the available processors to use in a transformer, see Processors that you can use.
Having log events in standardized format enables visibility across your applications for your log analysis, reporting, and alarming needs. CloudWatch Logs provides transformation for common log types with out-of-the-box transformation templates for major Amazon Web Services log sources such as VPC flow logs, Lambda, and Amazon RDS. You can use pre-built transformation templates or create custom transformation policies.
You can create transformers only for the log groups in the Standard log class.
You can have one account-level transformer policy that applies to all log groups in the account. Or you can create as many as 20 account-level transformer policies that are each scoped to a subset of log groups with the selectionCriteria
parameter. If you have multiple account-level transformer policies with selection criteria, no two of them can use the same or overlapping log group name prefixes. For example, if you have one policy filtered to log groups that start with my-log
, you can't have another field index policy filtered to my-logpprod
or my-logging
.
You can also set up a transformer at the log-group level. For more information, see PutTransformer. If there is both a log-group level transformer created with PutTransformer
and an account-level transformer that could apply to the same log group, the log group uses only the log-group level transformer. It ignores the account-level transformer.
Field index policy
You can use field index policies to create indexes on fields found in log events in the log group. Creating field indexes can help lower the scan volume for CloudWatch Logs Insights queries that reference those fields, because these queries attempt to skip the processing of log events that are known to not match the indexed field. Good fields to index are fields that you often need to query for and fields or values that match only a small fraction of the total log events. Common examples of indexes include request ID, session ID, user IDs, or instance IDs. For more information, see Create field indexes to improve query performance and reduce costs
To find the fields that are in your log group events, use the GetLogGroupFields operation.
For example, suppose you have created a field index for requestId
. Then, any CloudWatch Logs Insights query on that log group that includes requestId = value
or requestId in [value, value, ...]
will attempt to process only the log events where the indexed field matches the specified value.
Matches of log events to the names of indexed fields are case-sensitive. For example, an indexed field of RequestId
won't match a log event containing requestId
.
You can have one account-level field index policy that applies to all log groups in the account. Or you can create as many as 20 account-level field index policies that are each scoped to a subset of log groups with the selectionCriteria
parameter. If you have multiple account-level index policies with selection criteria, no two of them can use the same or overlapping log group name prefixes. For example, if you have one policy filtered to log groups that start with my-log
, you can't have another field index policy filtered to my-logpprod
or my-logging
.
If you create an account-level field index policy in a monitoring account in cross-account observability, the policy is applied only to the monitoring account and not to any source accounts.
If you want to create a field index policy for a single log group, you can use PutIndexPolicy instead of PutAccountPolicy
. If you do so, that log group will use only that log-group level policy, and will ignore the account-level policy that you create with PutAccountPolicy.
Creates an account-level data protection policy, subscription filter policy, or field index policy that applies to all log groups or a subset of log groups in the account.
To use this operation, you must be signed on with the correct permissions depending on the type of policy that you are creating.
To create a data protection policy, you must have the logs:PutDataProtectionPolicy
and logs:PutAccountPolicy
permissions.
To create a subscription filter policy, you must have the logs:PutSubscriptionFilter
and logs:PutccountPolicy
permissions.
To create a transformer policy, you must have the logs:PutTransformer
and logs:PutAccountPolicy
permissions.
To create a field index policy, you must have the logs:PutIndexPolicy
and logs:PutAccountPolicy
permissions.
Data protection policy
A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy.
Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked.
If you use PutAccountPolicy
to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked.
By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask
permission can use a GetLogEvents or FilterLogEvents operation with the unmask
parameter set to true
to view the unmasked log events. Users with the logs:Unmask
can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask
query command.
For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking.
To use the PutAccountPolicy
operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy
and logs:PutAccountPolicy
permissions.
The PutAccountPolicy
operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked.
Subscription filter policy
A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.
The following destinations are supported for subscription filters:
An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.
An Firehose data stream in the same account as the subscription policy, for same-account delivery.
A Lambda function in the same account as the subscription policy, for same-account delivery.
A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations.
Each account can have one account-level subscription filter policy per Region. If you are updating an existing filter, you must specify the correct name in PolicyName
. To perform a PutAccountPolicy
subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole
permission.
Transformer policy
Creates or updates a log transformer policy for your account. You use log transformers to transform log events into a different format, making them easier for you to process and analyze. You can also transform logs from different sources into standardized formats that contain relevant, source-specific information. After you have created a transformer, CloudWatch Logs performs this transformation at the time of log ingestion. You can then refer to the transformed versions of the logs during operations such as querying with CloudWatch Logs Insights or creating metric filters or subscription filters.
You can also use a transformer to copy metadata from metadata keys into the log events themselves. This metadata can include log group name, log stream name, account ID and Region.
A transformer for a log group is a series of processors, where each processor applies one type of transformation to the log events ingested into this log group. For more information about the available processors to use in a transformer, see Processors that you can use.
Having log events in standardized format enables visibility across your applications for your log analysis, reporting, and alarming needs. CloudWatch Logs provides transformation for common log types with out-of-the-box transformation templates for major Amazon Web Services log sources such as VPC flow logs, Lambda, and Amazon RDS. You can use pre-built transformation templates or create custom transformation policies.
You can create transformers only for the log groups in the Standard log class.
You can have one account-level transformer policy that applies to all log groups in the account. Or you can create as many as 20 account-level transformer policies that are each scoped to a subset of log groups with the selectionCriteria
parameter. If you have multiple account-level transformer policies with selection criteria, no two of them can use the same or overlapping log group name prefixes. For example, if you have one policy filtered to log groups that start with my-log
, you can't have another field index policy filtered to my-logpprod
or my-logging
.
You can also set up a transformer at the log-group level. For more information, see PutTransformer. If there is both a log-group level transformer created with PutTransformer
and an account-level transformer that could apply to the same log group, the log group uses only the log-group level transformer. It ignores the account-level transformer.
Field index policy
You can use field index policies to create indexes on fields found in log events in the log group. Creating field indexes can help lower the scan volume for CloudWatch Logs Insights queries that reference those fields, because these queries attempt to skip the processing of log events that are known to not match the indexed field. Good fields to index are fields that you often need to query for and fields or values that match only a small fraction of the total log events. Common examples of indexes include request ID, session ID, user IDs, or instance IDs. For more information, see Create field indexes to improve query performance and reduce costs
To find the fields that are in your log group events, use the GetLogGroupFields operation.
For example, suppose you have created a field index for requestId
. Then, any CloudWatch Logs Insights query on that log group that includes requestId = value
or requestId in [value, value, ...]
will attempt to process only the log events where the indexed field matches the specified value.
Matches of log events to the names of indexed fields are case-sensitive. For example, an indexed field of RequestId
won't match a log event containing requestId
.
You can have one account-level field index policy that applies to all log groups in the account. Or you can create as many as 20 account-level field index policies that are each scoped to a subset of log groups with the selectionCriteria
parameter. If you have multiple account-level index policies with selection criteria, no two of them can use the same or overlapping log group name prefixes. For example, if you have one policy filtered to log groups that start with my-log
, you can't have another field index policy filtered to my-logpprod
or my-logging
.
If you create an account-level field index policy in a monitoring account in cross-account observability, the policy is applied only to the monitoring account and not to any source accounts.
If you want to create a field index policy for a single log group, you can use PutIndexPolicy instead of PutAccountPolicy
. If you do so, that log group will use only that log-group level policy, and will ignore the account-level policy that you create with PutAccountPolicy.
Creates or updates a logical delivery destination. A delivery destination is an Amazon Web Services resource that represents an Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and Firehose are supported as logs delivery destinations.
To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:
Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource.
Use PutDeliveryDestination
to create a delivery destination, which is a logical object that represents the actual delivery destination.
If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination.
Use CreateDelivery
to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery.
You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services.
If you use this operation to update an existing delivery destination, all the current delivery destination parameters are overwritten with the new parameter values that you specify.
" + "documentation":"Creates or updates a logical delivery destination. A delivery destination is an Amazon Web Services resource that represents an Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and Firehose are supported as logs delivery destinations.
To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:
Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource.
Use PutDeliveryDestination
to create a delivery destination in the same account of the actual delivery destination. The delivery destination that you create is a logical object that represents the actual delivery destination.
If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination.
Use CreateDelivery
to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery.
You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services.
If you use this operation to update an existing delivery destination, all the current delivery destination parameters are overwritten with the new parameter values that you specify.
" }, "PutDeliveryDestinationPolicy":{ "name":"PutDeliveryDestinationPolicy", @@ -1146,7 +1146,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"InvalidOperationException"} ], - "documentation":"Creates or updates a metric filter and associates it with the specified log group. With metric filters, you can configure rules to extract metric data from log events ingested through PutLogEvents.
The maximum number of metric filters that can be associated with a log group is 100.
Using regular expressions to create metric filters is supported. For these filters, there is a quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in metric filters, see Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail.
When you create a metric filter, you can also optionally assign a unit and dimensions to the metric that is created.
Metrics extracted from log events are charged as custom metrics. To prevent unexpected high charges, do not specify high-cardinality fields such as IPAddress
or requestID
as dimensions. Each different value found for a dimension is treated as a separate metric and accrues charges as a separate custom metric.
CloudWatch Logs might disable a metric filter if it generates 1,000 different name/value pairs for your specified dimensions within one hour.
You can also set up a billing alarm to alert you if your charges are higher than expected. For more information, see Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges.
Creates or updates a metric filter and associates it with the specified log group. With metric filters, you can configure rules to extract metric data from log events ingested through PutLogEvents.
The maximum number of metric filters that can be associated with a log group is 100.
Using regular expressions in filter patterns is supported. For these filters, there is a quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in filter patterns, see Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail.
When you create a metric filter, you can also optionally assign a unit and dimensions to the metric that is created.
Metrics extracted from log events are charged as custom metrics. To prevent unexpected high charges, do not specify high-cardinality fields such as IPAddress
or requestID
as dimensions. Each different value found for a dimension is treated as a separate metric and accrues charges as a separate custom metric.
CloudWatch Logs might disable a metric filter if it generates 1,000 different name/value pairs for your specified dimensions within one hour.
You can also set up a billing alarm to alert you if your charges are higher than expected. For more information, see Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges.
Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.
The following destinations are supported for subscription filters:
An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery.
A logical destination created with PutDestination that belongs to a different account, for cross-account delivery. We currently support Kinesis Data Streams and Firehose as logical destinations.
An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery.
An Lambda function that belongs to the same account as the subscription filter, for same-account delivery.
Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName
.
Using regular expressions to create subscription filters is supported. For these filters, there is a quotas of quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in subscription filters, see Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail.
To perform a PutSubscriptionFilter
operation for any destination except a Lambda function, you must also have the iam:PassRole
permission.
Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.
The following destinations are supported for subscription filters:
An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery.
A logical destination created with PutDestination that belongs to a different account, for cross-account delivery. We currently support Kinesis Data Streams and Firehose as logical destinations.
An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery.
An Lambda function that belongs to the same account as the subscription filter, for same-account delivery.
Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName
.
Using regular expressions in filter patterns is supported. For these filters, there is a quotas of quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in filter patterns, see Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail.
To perform a PutSubscriptionFilter
operation for any destination except a Lambda function, you must also have the iam:PassRole
permission.
Specify the ARN of an IAM role that CloudWatch Logs will use to create the integration. This role must have the permissions necessary to access the OpenSearch Service collection to be able to create the dashboards. For more information about the permissions needed, see Create an IAM role to access the OpenSearch Service collection in the CloudWatch Logs User Guide.
" + "documentation":"Specify the ARN of an IAM role that CloudWatch Logs will use to create the integration. This role must have the permissions necessary to access the OpenSearch Service collection to be able to create the dashboards. For more information about the permissions needed, see Permissions that the integration needs in the CloudWatch Logs User Guide.
" }, "dashboardViewerPrincipals":{ "shape":"DashboardViewerPrincipals", - "documentation":"Specify the ARNs of IAM roles and IAM users who you want to grant permission to for viewing the dashboards.
In addition to specifying these users here, you must also grant them the CloudWatchOpenSearchDashboardsAccess IAM policy. For more information, see
Specify the ARNs of IAM roles and IAM users who you want to grant permission to for viewing the dashboards.
In addition to specifying these users here, you must also grant them the CloudWatchOpenSearchDashboardAccess IAM policy. For more information, see IAM policies for users.
Defines the type of log that the source is sending.
For Amazon Bedrock, the valid value is APPLICATION_LOGS
.
For Amazon CodeWhisperer, the valid value is EVENT_LOGS
.
For IAM Identity Center, the valid value is ERROR_LOGS
.
For Amazon WorkMail, the valid values are ACCESS_CONTROL_LOGS
, AUTHENTICATION_LOGS
, WORKMAIL_AVAILABILITY_PROVIDER_LOGS
, and WORKMAIL_MAILBOX_ACCESS_LOGS
.
Defines the type of log that the source is sending.
For Amazon Bedrock, the valid value is APPLICATION_LOGS
.
For CloudFront, the valid value is ACCESS_LOGS
.
For Amazon CodeWhisperer, the valid value is EVENT_LOGS
.
For Elemental MediaPackage, the valid values are EGRESS_ACCESS_LOGS
and INGRESS_ACCESS_LOGS
.
For Elemental MediaTailor, the valid values are AD_DECISION_SERVER_LOGS
, MANIFEST_SERVICE_LOGS
, and TRANSCODE_LOGS
.
For IAM Identity Center, the valid value is ERROR_LOGS
.
For Amazon Q, the valid value is EVENT_LOGS
.
For Amazon SES mail manager, the valid value is APPLICATION_LOG
.
For Amazon WorkMail, the valid values are ACCESS_CONTROL_LOGS
, AUTHENTICATION_LOGS
, WORKMAIL_AVAILABILITY_PROVIDER_LOGS
, WORKMAIL_MAILBOX_ACCESS_LOGS
, and WORKMAIL_PERSONAL_ACCESS_TOKEN_LOGS
.
This string allows re-configuring the S3 object prefix to contain either static or variable sections. The valid variables to use in the suffix path will vary by each log source. See ConfigurationTemplate$allowedSuffixPathFields for more info on what values are supported in the suffix path for each log source.
" + "documentation":"This string allows re-configuring the S3 object prefix to contain either static or variable sections. The valid variables to use in the suffix path will vary by each log source. To find the values supported for the suffix path for each log source, use the DescribeConfigurationTemplates operation and check the allowedSuffixPathFields
field in the response.
his exception is returned if an unknown error occurs during a Live Tail session.
", + "documentation":"This exception is returned if an unknown error occurs during a Live Tail session.
", "exception":true }, "SessionTimeoutException":{ diff --git a/botocore/data/mailmanager/2023-10-17/paginators-1.json b/botocore/data/mailmanager/2023-10-17/paginators-1.json index 70309f1c2d..a87e968292 100644 --- a/botocore/data/mailmanager/2023-10-17/paginators-1.json +++ b/botocore/data/mailmanager/2023-10-17/paginators-1.json @@ -53,6 +53,24 @@ "output_token": "NextToken", "limit_key": "PageSize", "result_key": "TrafficPolicies" + }, + "ListAddressListImportJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize", + "result_key": "ImportJobs" + }, + "ListAddressLists": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize", + "result_key": "AddressLists" + }, + "ListMembersOfAddressList": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize", + "result_key": "Addresses" } } } diff --git a/botocore/data/mailmanager/2023-10-17/service-2.json b/botocore/data/mailmanager/2023-10-17/service-2.json index f20070197b..ed29279730 100644 --- a/botocore/data/mailmanager/2023-10-17/service-2.json +++ b/botocore/data/mailmanager/2023-10-17/service-2.json @@ -48,6 +48,41 @@ "documentation":"Creates a subscription for an Add On representing the acceptance of its terms of use and additional pricing. The subscription can then be used to create an instance for use in rule sets or traffic policies.
", "idempotent":true }, + "CreateAddressList":{ + "name":"CreateAddressList", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAddressListRequest"}, + "output":{"shape":"CreateAddressListResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"Creates a new address list.
", + "idempotent":true + }, + "CreateAddressListImportJob":{ + "name":"CreateAddressListImportJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAddressListImportJobRequest"}, + "output":{"shape":"CreateAddressListImportJobResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"Creates an import job for an address list.
", + "idempotent":true + }, "CreateArchive":{ "name":"CreateArchive", "http":{ @@ -160,6 +195,22 @@ "documentation":"Deletes an Add On subscription.
", "idempotent":true }, + "DeleteAddressList":{ + "name":"DeleteAddressList", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAddressListRequest"}, + "output":{"shape":"DeleteAddressListResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"Deletes an address list.
", + "idempotent":true + }, "DeleteArchive":{ "name":"DeleteArchive", "http":{ @@ -240,6 +291,23 @@ "documentation":"Delete a traffic policy resource.
", "idempotent":true }, + "DeregisterMemberFromAddressList":{ + "name":"DeregisterMemberFromAddressList", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterMemberFromAddressListRequest"}, + "output":{"shape":"DeregisterMemberFromAddressListResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"Removes a member from an address list.
", + "idempotent":true + }, "GetAddonInstance":{ "name":"GetAddonInstance", "http":{ @@ -268,6 +336,38 @@ ], "documentation":"Gets detailed information about an Add On subscription.
" }, + "GetAddressList":{ + "name":"GetAddressList", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAddressListRequest"}, + "output":{"shape":"GetAddressListResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"Fetch attributes of an address list.
" + }, + "GetAddressListImportJob":{ + "name":"GetAddressListImportJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAddressListImportJobRequest"}, + "output":{"shape":"GetAddressListImportJobResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"Fetch attributes of an import job.
" + }, "GetArchive":{ "name":"GetArchive", "http":{ @@ -374,6 +474,22 @@ ], "documentation":"Fetch ingress endpoint resource attributes.
" }, + "GetMemberOfAddressList":{ + "name":"GetMemberOfAddressList", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetMemberOfAddressListRequest"}, + "output":{"shape":"GetMemberOfAddressListResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"Fetch attributes of a member in an address list.
" + }, "GetRelay":{ "name":"GetRelay", "http":{ @@ -442,6 +558,37 @@ ], "documentation":"Lists all Add On subscriptions in your account.
" }, + "ListAddressListImportJobs":{ + "name":"ListAddressListImportJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAddressListImportJobsRequest"}, + "output":{"shape":"ListAddressListImportJobsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"Lists jobs for an address list.
" + }, + "ListAddressLists":{ + "name":"ListAddressLists", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAddressListsRequest"}, + "output":{"shape":"ListAddressListsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"Lists address lists for this account.
" + }, "ListArchiveExports":{ "name":"ListArchiveExports", "http":{ @@ -502,6 +649,22 @@ ], "documentation":"List all ingress endpoint resources.
" }, + "ListMembersOfAddressList":{ + "name":"ListMembersOfAddressList", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListMembersOfAddressListRequest"}, + "output":{"shape":"ListMembersOfAddressListResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"Lists members of an address list.
" + }, "ListRelays":{ "name":"ListRelays", "http":{ @@ -555,6 +718,43 @@ ], "documentation":"List traffic policy resources.
" }, + "RegisterMemberToAddressList":{ + "name":"RegisterMemberToAddressList", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterMemberToAddressListRequest"}, + "output":{"shape":"RegisterMemberToAddressListResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"Adds a member to an address list.
", + "idempotent":true + }, + "StartAddressListImportJob":{ + "name":"StartAddressListImportJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartAddressListImportJobRequest"}, + "output":{"shape":"StartAddressListImportJobResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"Starts an import job for an address list.
", + "idempotent":true + }, "StartArchiveExport":{ "name":"StartArchiveExport", "http":{ @@ -590,6 +790,24 @@ ], "documentation":"Initiates a search across emails in the specified archive.
" }, + "StopAddressListImportJob":{ + "name":"StopAddressListImportJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopAddressListImportJobRequest"}, + "output":{"shape":"StopAddressListImportJobResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"Stops an ongoing import job for an address list.
", + "idempotent":true + }, "StopArchiveExport":{ "name":"StopArchiveExport", "http":{ @@ -850,6 +1068,84 @@ "type":"list", "member":{"shape":"AddonSubscription"} }, + "Address":{ + "type":"string", + "max":320, + "min":3, + "sensitive":true + }, + "AddressFilter":{ + "type":"structure", + "members":{ + "AddressPrefix":{ + "shape":"AddressPrefix", + "documentation":"Filter to limit the results to addresses having the provided prefix.
" + } + }, + "documentation":"Filtering options for ListMembersOfAddressList operation.
" + }, + "AddressList":{ + "type":"structure", + "required":[ + "AddressListArn", + "AddressListId", + "AddressListName", + "CreatedTimestamp", + "LastUpdatedTimestamp" + ], + "members":{ + "AddressListArn":{ + "shape":"AddressListArn", + "documentation":"The Amazon Resource Name (ARN) of the address list.
" + }, + "AddressListId":{ + "shape":"AddressListId", + "documentation":"The identifier of the address list.
" + }, + "AddressListName":{ + "shape":"AddressListName", + "documentation":"The user-friendly name of the address list.
" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"The timestamp of when the address list was created.
" + }, + "LastUpdatedTimestamp":{ + "shape":"Timestamp", + "documentation":"The timestamp of when the address list was last updated.
" + } + }, + "documentation":"An address list contains a list of emails and domains that are used in MailManager Ingress endpoints and Rules for email management.
" + }, + "AddressListArn":{"type":"string"}, + "AddressListId":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[a-zA-Z0-9-]+$" + }, + "AddressListName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[a-zA-Z0-9_.-]+$" + }, + "AddressLists":{ + "type":"list", + "member":{"shape":"AddressList"} + }, + "AddressPageSize":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "AddressPrefix":{ + "type":"string", + "max":320, + "min":1, + "sensitive":true + }, "Analysis":{ "type":"structure", "required":[ @@ -1145,6 +1441,79 @@ } } }, + "CreateAddressListImportJobRequest":{ + "type":"structure", + "required":[ + "AddressListId", + "ImportDataFormat", + "Name" + ], + "members":{ + "AddressListId":{ + "shape":"AddressListId", + "documentation":"The unique identifier of the address list for importing addresses to.
" + }, + "ClientToken":{ + "shape":"IdempotencyToken", + "documentation":"A unique token that Amazon SES uses to recognize subsequent retries of the same request.
", + "idempotencyToken":true + }, + "ImportDataFormat":{ + "shape":"ImportDataFormat", + "documentation":"The format of the input for an import job.
" + }, + "Name":{ + "shape":"JobName", + "documentation":"A user-friendly name for the import job.
" + } + } + }, + "CreateAddressListImportJobResponse":{ + "type":"structure", + "required":[ + "JobId", + "PreSignedUrl" + ], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"The identifier of the created import job.
" + }, + "PreSignedUrl":{ + "shape":"PreSignedUrl", + "documentation":"The pre-signed URL target for uploading the input file.
" + } + } + }, + "CreateAddressListRequest":{ + "type":"structure", + "required":["AddressListName"], + "members":{ + "AddressListName":{ + "shape":"AddressListName", + "documentation":"A user-friendly name for the address list.
" + }, + "ClientToken":{ + "shape":"IdempotencyToken", + "documentation":"A unique token that Amazon SES uses to recognize subsequent retries of the same request.
", + "idempotencyToken":true + }, + "Tags":{ + "shape":"TagList", + "documentation":"The tags used to organize, track, or control access for the resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.
" + } + } + }, + "CreateAddressListResponse":{ + "type":"structure", + "required":["AddressListId"], + "members":{ + "AddressListId":{ + "shape":"AddressListId", + "documentation":"The identifier of the created address list.
" + } + } + }, "CreateArchiveRequest":{ "type":"structure", "required":["ArchiveName"], @@ -1391,6 +1760,21 @@ "members":{ } }, + "DeleteAddressListRequest":{ + "type":"structure", + "required":["AddressListId"], + "members":{ + "AddressListId":{ + "shape":"AddressListId", + "documentation":"The identifier of an existing address list resource to delete.
" + } + } + }, + "DeleteAddressListResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteArchiveRequest":{ "type":"structure", "required":["ArchiveId"], @@ -1517,6 +1901,28 @@ }, "documentation":"The action to deliver incoming emails to an Amazon Q Business application for indexing.
" }, + "DeregisterMemberFromAddressListRequest":{ + "type":"structure", + "required":[ + "Address", + "AddressListId" + ], + "members":{ + "Address":{ + "shape":"Address", + "documentation":"The address to be removed from the address list.
" + }, + "AddressListId":{ + "shape":"AddressListId", + "documentation":"The unique identifier of the address list to remove the address from.
" + } + } + }, + "DeregisterMemberFromAddressListResponse":{ + "type":"structure", + "members":{ + } + }, "Double":{ "type":"double", "box":true @@ -1645,44 +2051,158 @@ "shape":"AddonInstanceArn", "documentation":"The Amazon Resource Name (ARN) of the Add On instance.
" }, - "AddonName":{ - "shape":"AddonName", - "documentation":"The name of the Add On provider associated to the subscription of the instance.
" + "AddonName":{ + "shape":"AddonName", + "documentation":"The name of the Add On provider associated to the subscription of the instance.
" + }, + "AddonSubscriptionId":{ + "shape":"AddonSubscriptionId", + "documentation":"The subscription ID associated to the instance.
" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"The timestamp of when the Add On instance was created.
" + } + } + }, + "GetAddonSubscriptionRequest":{ + "type":"structure", + "required":["AddonSubscriptionId"], + "members":{ + "AddonSubscriptionId":{ + "shape":"AddonSubscriptionId", + "documentation":"The Add On subscription ID to retrieve information for.
" + } + } + }, + "GetAddonSubscriptionResponse":{ + "type":"structure", + "members":{ + "AddonName":{ + "shape":"AddonName", + "documentation":"The name of the Add On for the subscription.
" + }, + "AddonSubscriptionArn":{ + "shape":"AddonSubscriptionArn", + "documentation":"Amazon Resource Name (ARN) for the subscription.
" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"The timestamp of when the Add On subscription was created.
" + } + } + }, + "GetAddressListImportJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"The identifier of the import job that needs to be retrieved.
" + } + } + }, + "GetAddressListImportJobResponse":{ + "type":"structure", + "required":[ + "AddressListId", + "CreatedTimestamp", + "ImportDataFormat", + "JobId", + "Name", + "PreSignedUrl", + "Status" + ], + "members":{ + "AddressListId":{ + "shape":"AddressListId", + "documentation":"The unique identifier of the address list the import job was created for.
" + }, + "CompletedTimestamp":{ + "shape":"Timestamp", + "documentation":"The timestamp of when the import job was completed.
" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"The timestamp of when the import job was created.
" + }, + "Error":{ + "shape":"ErrorMessage", + "documentation":"The reason for failure of an import job.
" }, - "AddonSubscriptionId":{ - "shape":"AddonSubscriptionId", - "documentation":"The subscription ID associated to the instance.
" + "FailedItemsCount":{ + "shape":"JobItemsCount", + "documentation":"The number of input addresses that failed to be imported into the address list.
" }, - "CreatedTimestamp":{ + "ImportDataFormat":{ + "shape":"ImportDataFormat", + "documentation":"The format of the input for an import job.
" + }, + "ImportedItemsCount":{ + "shape":"JobItemsCount", + "documentation":"The number of input addresses successfully imported into the address list.
" + }, + "JobId":{ + "shape":"JobId", + "documentation":"The identifier of the import job.
" + }, + "Name":{ + "shape":"JobName", + "documentation":"A user-friendly name for the import job.
" + }, + "PreSignedUrl":{ + "shape":"PreSignedUrl", + "documentation":"The pre-signed URL target for uploading the input file.
" + }, + "StartTimestamp":{ "shape":"Timestamp", - "documentation":"The timestamp of when the Add On instance was created.
" + "documentation":"The timestamp of when the import job was started.
" + }, + "Status":{ + "shape":"ImportJobStatus", + "documentation":"The status of the import job.
" } } }, - "GetAddonSubscriptionRequest":{ + "GetAddressListRequest":{ "type":"structure", - "required":["AddonSubscriptionId"], + "required":["AddressListId"], "members":{ - "AddonSubscriptionId":{ - "shape":"AddonSubscriptionId", - "documentation":"The Add On subscription ID to retrieve information for.
" + "AddressListId":{ + "shape":"AddressListId", + "documentation":"The identifier of an existing address list resource to be retrieved.
" } } }, - "GetAddonSubscriptionResponse":{ + "GetAddressListResponse":{ "type":"structure", + "required":[ + "AddressListArn", + "AddressListId", + "AddressListName", + "CreatedTimestamp", + "LastUpdatedTimestamp" + ], "members":{ - "AddonName":{ - "shape":"AddonName", - "documentation":"The name of the Add On for the subscription.
" + "AddressListArn":{ + "shape":"AddressListArn", + "documentation":"The Amazon Resource Name (ARN) of the address list resource.
" }, - "AddonSubscriptionArn":{ - "shape":"AddonSubscriptionArn", - "documentation":"Amazon Resource Name (ARN) for the subscription.
" + "AddressListId":{ + "shape":"AddressListId", + "documentation":"The identifier of the address list resource.
" + }, + "AddressListName":{ + "shape":"AddressListName", + "documentation":"A user-friendly name for the address list resource.
" }, "CreatedTimestamp":{ "shape":"Timestamp", - "documentation":"The timestamp of when the Add On subscription was created.
" + "documentation":"The date of when then address list was created.
" + }, + "LastUpdatedTimestamp":{ + "shape":"Timestamp", + "documentation":"The date of when the address list was last updated.
" } } }, @@ -1962,6 +2482,40 @@ } } }, + "GetMemberOfAddressListRequest":{ + "type":"structure", + "required":[ + "Address", + "AddressListId" + ], + "members":{ + "Address":{ + "shape":"Address", + "documentation":"The address to be retrieved from the address list.
" + }, + "AddressListId":{ + "shape":"AddressListId", + "documentation":"The unique identifier of the address list to retrieve the address from.
" + } + } + }, + "GetMemberOfAddressListResponse":{ + "type":"structure", + "required":[ + "Address", + "CreatedTimestamp" + ], + "members":{ + "Address":{ + "shape":"Address", + "documentation":"The address retrieved from the address list.
" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"The timestamp of when the address was created.
" + } + } + }, "GetRelayRequest":{ "type":"structure", "required":["RelayId"], @@ -2136,6 +2690,111 @@ "max":128, "min":1 }, + "ImportDataFormat":{ + "type":"structure", + "required":["ImportDataType"], + "members":{ + "ImportDataType":{ + "shape":"ImportDataType", + "documentation":"The type of file that would be passed as an input for the address list import job.
" + } + }, + "documentation":"The import data format contains the specifications of the input file that would be passed to the address list import job.
" + }, + "ImportDataType":{ + "type":"string", + "enum":[ + "CSV", + "JSON" + ] + }, + "ImportJob":{ + "type":"structure", + "required":[ + "AddressListId", + "CreatedTimestamp", + "ImportDataFormat", + "JobId", + "Name", + "PreSignedUrl", + "Status" + ], + "members":{ + "AddressListId":{ + "shape":"AddressListId", + "documentation":"The unique identifier of the address list the import job was created for.
" + }, + "CompletedTimestamp":{ + "shape":"Timestamp", + "documentation":"The timestamp of when the import job was completed.
" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"The timestamp of when the import job was created.
" + }, + "Error":{ + "shape":"ErrorMessage", + "documentation":"The reason for failure of an import job.
" + }, + "FailedItemsCount":{ + "shape":"JobItemsCount", + "documentation":"The number of addresses in the input that failed to get imported into address list.
" + }, + "ImportDataFormat":{ + "shape":"ImportDataFormat", + "documentation":"The format of the input for the import job.
" + }, + "ImportedItemsCount":{ + "shape":"JobItemsCount", + "documentation":"The number of addresses in the input that were successfully imported into the address list.
" + }, + "JobId":{ + "shape":"JobId", + "documentation":"The identifier of the import job.
" + }, + "Name":{ + "shape":"JobName", + "documentation":"A user-friendly name for the import job.
" + }, + "PreSignedUrl":{ + "shape":"PreSignedUrl", + "documentation":"The pre-signed URL target for uploading the input file.
" + }, + "StartTimestamp":{ + "shape":"Timestamp", + "documentation":"The timestamp of when the import job was started.
" + }, + "Status":{ + "shape":"ImportJobStatus", + "documentation":"The status of the import job.
" + } + }, + "documentation":"Details about an import job.
" + }, + "ImportJobStatus":{ + "type":"string", + "enum":[ + "CREATED", + "PROCESSING", + "COMPLETED", + "FAILED", + "STOPPED" + ] + }, + "ImportJobs":{ + "type":"list", + "member":{"shape":"ImportJob"} + }, + "IngressAddressListArnList":{ + "type":"list", + "member":{"shape":"AddressListArn"}, + "max":1, + "min":1 + }, + "IngressAddressListEmailAttribute":{ + "type":"string", + "enum":["RECIPIENT"] + }, "IngressAnalysis":{ "type":"structure", "required":[ @@ -2185,6 +2844,10 @@ "Analysis":{ "shape":"IngressAnalysis", "documentation":"The structure type for a boolean condition stating the Add On ARN and its returned value.
" + }, + "IsInAddressList":{ + "shape":"IngressIsInAddressList", + "documentation":"The structure type for a boolean condition that provides the address lists to evaluate incoming traffic on.
" } }, "documentation":"The union type representing the allowed types of operands for a boolean condition.
", @@ -2235,6 +2898,24 @@ }, "documentation":"The union type representing the allowed types for the left hand side of an IP condition.
" }, + "IngressIsInAddressList":{ + "type":"structure", + "required":[ + "AddressLists", + "Attribute" + ], + "members":{ + "AddressLists":{ + "shape":"IngressAddressListArnList", + "documentation":"The address lists that will be used for evaluation.
" + }, + "Attribute":{ + "shape":"IngressAddressListEmailAttribute", + "documentation":"The email attribute that needs to be evaluated against the address list.
" + } + }, + "documentation":"The address lists and the address list attribute value that is evaluated in a policy statement's conditional expression to either deny or block the incoming email.
" + }, "IngressPoint":{ "type":"structure", "required":[ @@ -2468,6 +3149,22 @@ "type":"list", "member":{"shape":"Ipv4Cidr"} }, + "JobId":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[a-zA-Z0-9-]+$" + }, + "JobItemsCount":{ + "type":"integer", + "box":true + }, + "JobName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[a-zA-Z0-9_.-]+$" + }, "KmsKeyArn":{ "type":"string", "pattern":"^arn:aws(|-cn|-us-gov):kms:[a-z0-9-]{1,20}:[0-9]{12}:(key|alias)/.+$" @@ -2530,6 +3227,65 @@ } } }, + "ListAddressListImportJobsRequest":{ + "type":"structure", + "required":["AddressListId"], + "members":{ + "AddressListId":{ + "shape":"AddressListId", + "documentation":"The unique identifier of the address list for listing import jobs.
" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"If you received a pagination token from a previous call to this API, you can provide it here to continue paginating through the next page of results.
" + }, + "PageSize":{ + "shape":"PageSize", + "documentation":"The maximum number of import jobs that are returned per call. You can use NextToken to retrieve the next page of jobs.
" + } + } + }, + "ListAddressListImportJobsResponse":{ + "type":"structure", + "required":["ImportJobs"], + "members":{ + "ImportJobs":{ + "shape":"ImportJobs", + "documentation":"The list of import jobs.
" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.
" + } + } + }, + "ListAddressListsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"If you received a pagination token from a previous call to this API, you can provide it here to continue paginating through the next page of results.
" + }, + "PageSize":{ + "shape":"PageSize", + "documentation":"The maximum number of address list resources that are returned per call. You can use NextToken to retrieve the next page of address lists.
" + } + } + }, + "ListAddressListsResponse":{ + "type":"structure", + "required":["AddressLists"], + "members":{ + "AddressLists":{ + "shape":"AddressLists", + "documentation":"The list of address lists.
" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.
" + } + } + }, "ListArchiveExportsRequest":{ "type":"structure", "required":["ArchiveId"], @@ -2651,6 +3407,42 @@ } } }, + "ListMembersOfAddressListRequest":{ + "type":"structure", + "required":["AddressListId"], + "members":{ + "AddressListId":{ + "shape":"AddressListId", + "documentation":"The unique identifier of the address list to list the addresses from.
" + }, + "Filter":{ + "shape":"AddressFilter", + "documentation":"Filter to be used to limit the results.
" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"If you received a pagination token from a previous call to this API, you can provide it here to continue paginating through the next page of results.
" + }, + "PageSize":{ + "shape":"AddressPageSize", + "documentation":"The maximum number of address list members that are returned per call. You can use NextToken to retrieve the next page of members.
" + } + } + }, + "ListMembersOfAddressListResponse":{ + "type":"structure", + "required":["Addresses"], + "members":{ + "Addresses":{ + "shape":"SavedAddresses", + "documentation":"The list of addresses.
" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.
" + } + } + }, "ListRelaysRequest":{ "type":"structure", "members":{ @@ -2896,6 +3688,10 @@ "type":"list", "member":{"shape":"PolicyStatement"} }, + "PreSignedUrl":{ + "type":"string", + "sensitive":true + }, "QBusinessApplicationId":{ "type":"string", "max":36, @@ -2914,6 +3710,28 @@ "max":100, "min":1 }, + "RegisterMemberToAddressListRequest":{ + "type":"structure", + "required":[ + "Address", + "AddressListId" + ], + "members":{ + "Address":{ + "shape":"Address", + "documentation":"The address to be added to the address list.
" + }, + "AddressListId":{ + "shape":"AddressListId", + "documentation":"The unique identifier of the address list where the address should be added.
" + } + } + }, + "RegisterMemberToAddressListResponse":{ + "type":"structure", + "members":{ + } + }, "Relay":{ "type":"structure", "members":{ @@ -3194,6 +4012,23 @@ "max":10, "min":1 }, + "RuleAddressListArnList":{ + "type":"list", + "member":{"shape":"AddressListArn"}, + "max":1, + "min":1 + }, + "RuleAddressListEmailAttribute":{ + "type":"string", + "enum":[ + "RECIPIENT", + "MAIL_FROM", + "SENDER", + "FROM", + "TO", + "CC" + ] + }, "RuleBooleanEmailAttribute":{ "type":"string", "enum":[ @@ -3233,6 +4068,10 @@ "Attribute":{ "shape":"RuleBooleanEmailAttribute", "documentation":"The boolean type representing the allowed attribute types for an email.
" + }, + "IsInAddressList":{ + "shape":"RuleIsInAddressList", + "documentation":"The structure representing the address lists and address list attribute that will be used in evaluation of boolean expression.
" } }, "documentation":"The union type representing the allowed types of operands for a boolean condition.
", @@ -3371,6 +4210,24 @@ "max":10, "min":1 }, + "RuleIsInAddressList":{ + "type":"structure", + "required":[ + "AddressLists", + "Attribute" + ], + "members":{ + "AddressLists":{ + "shape":"RuleAddressListArnList", + "documentation":"The address lists that will be used for evaluation.
" + }, + "Attribute":{ + "shape":"RuleAddressListEmailAttribute", + "documentation":"The email attribute that needs to be evaluated against the address list.
" + } + }, + "documentation":"The structure type for a boolean condition that provides the address lists and address list attribute to evaluate.
" + }, "RuleName":{ "type":"string", "max":32, @@ -3662,6 +4519,28 @@ "pattern":"^[a-zA-Z0-9!_.*'()/-]+$" }, "S3PresignedURL":{"type":"string"}, + "SavedAddress":{ + "type":"structure", + "required":[ + "Address", + "CreatedTimestamp" + ], + "members":{ + "Address":{ + "shape":"Address", + "documentation":"The email or domain that constitutes the address.
" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"The timestamp of when the address was added to the address list.
" + } + }, + "documentation":"An address that is a member of an address list.
" + }, + "SavedAddresses":{ + "type":"list", + "member":{"shape":"SavedAddress"} + }, "SearchId":{ "type":"string", "max":64, @@ -3761,6 +4640,21 @@ "pattern":"^[A-Za-z0-9!@#$%^&*()_+\\-=\\[\\]{}|.,?]+$", "sensitive":true }, + "StartAddressListImportJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"The identifier of the import job that needs to be started.
" + } + } + }, + "StartAddressListImportJobResponse":{ + "type":"structure", + "members":{ + } + }, "StartArchiveExportRequest":{ "type":"structure", "required":[ @@ -3853,6 +4747,21 @@ }, "documentation":"The response from initiating an archive search.
" }, + "StopAddressListImportJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"The identifier of the import job that needs to be stopped.
" + } + } + }, + "StopAddressListImportJobResponse":{ + "type":"structure", + "members":{ + } + }, "StopArchiveExportRequest":{ "type":"structure", "required":["ExportId"], @@ -3925,8 +4834,7 @@ "type":"string", "max":128, "min":1, - "pattern":"^[a-zA-Z0-9/_\\+=\\.:@\\-]+$", - "sensitive":true + "pattern":"^[a-zA-Z0-9/_\\+=\\.:@\\-]+$" }, "TagKeyList":{ "type":"list", @@ -3966,8 +4874,7 @@ "type":"string", "max":256, "min":0, - "pattern":"^[a-zA-Z0-9/_\\+=\\.:@\\-]*$", - "sensitive":true + "pattern":"^[a-zA-Z0-9/_\\+=\\.:@\\-]*$" }, "TaggableResourceArn":{ "type":"string", diff --git a/botocore/data/mediaconvert/2017-08-29/service-2.json b/botocore/data/mediaconvert/2017-08-29/service-2.json index 518a698cc4..f41cdbbda9 100644 --- a/botocore/data/mediaconvert/2017-08-29/service-2.json +++ b/botocore/data/mediaconvert/2017-08-29/service-2.json @@ -2004,7 +2004,7 @@ "ExternalAudioFileInput": { "shape": "__stringPatternS3Https", "locationName": "externalAudioFileInput", - "documentation": "Specifies audio data from an external file source." + "documentation": "Specify the S3, HTTP, or HTTPS URL for your external audio file input." }, "HlsRenditionGroupSettings": { "shape": "HlsRenditionGroupSettings", @@ -2014,12 +2014,12 @@ "LanguageCode": { "shape": "LanguageCode", "locationName": "languageCode", - "documentation": "Selects a specific language code from within an audio source." + "documentation": "Specify the language to select from your audio input. In the MediaConvert console choose from a list of languages. In your JSON job settings choose from an ISO 639-2 three-letter code listed at https://www.loc.gov/standards/iso639-2/php/code_list.php" }, "Offset": { "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "offset", - "documentation": "Specifies a time delta in milliseconds to offset the audio from the input video." + "documentation": "Specify a time delta, in milliseconds, to offset the audio from the input video.\nTo specify no offset: Keep the default value, 0.\nTo specify an offset: Enter an integer from -2147483648 to 2147483647" }, "Pids": { "shape": "__listOf__integerMin1Max2147483647", @@ -4995,6 +4995,45 @@ "NO_DISPLAY_WINDOW" ] }, + "DynamicAudioSelector": { + "type": "structure", + "members": { + "AudioDurationCorrection": { + "shape": "AudioDurationCorrection", + "locationName": "audioDurationCorrection", + "documentation": "Apply audio timing corrections to help synchronize audio and video in your output. To apply timing corrections, your input must meet the following requirements: * Container: MP4, or MOV, with an accurate time-to-sample (STTS) table. * Audio track: AAC. Choose from the following audio timing correction settings: * Disabled (Default): Apply no correction. * Auto: Recommended for most inputs. MediaConvert analyzes the audio timing in your input and determines which correction setting to use, if needed. * Track: Adjust the duration of each audio frame by a constant amount to align the audio track length with STTS duration. Track-level correction does not affect pitch, and is recommended for tonal audio content such as music. * Frame: Adjust the duration of each audio frame by a variable amount to align audio frames with STTS timestamps. No corrections are made to already-aligned frames. Frame-level correction may affect the pitch of corrected frames, and is recommended for atonal audio content such as speech or percussion. * Force: Apply audio duration correction, either Track or Frame depending on your input, regardless of the accuracy of your input's STTS table. Your output audio and video may not be aligned or it may contain audio artifacts." + }, + "ExternalAudioFileInput": { + "shape": "__stringPatternS3Https", + "locationName": "externalAudioFileInput", + "documentation": "Specify the S3, HTTP, or HTTPS URL for your external audio file input." + }, + "LanguageCode": { + "shape": "LanguageCode", + "locationName": "languageCode", + "documentation": "Specify the language to select from your audio input. In the MediaConvert console choose from a list of languages. In your JSON job settings choose from an ISO 639-2 three-letter code listed at https://www.loc.gov/standards/iso639-2/php/code_list.php" + }, + "Offset": { + "shape": "__integerMinNegative2147483648Max2147483647", + "locationName": "offset", + "documentation": "Specify a time delta, in milliseconds, to offset the audio from the input video.\nTo specify no offset: Keep the default value, 0.\nTo specify an offset: Enter an integer from -2147483648 to 2147483647" + }, + "SelectorType": { + "shape": "DynamicAudioSelectorType", + "locationName": "selectorType", + "documentation": "Specify which audio tracks to dynamically select from your source. To select all audio tracks: Keep the default value, All tracks. To select all audio tracks with a specific language code: Choose Language code. When you do, you must also specify a language code under the Language code setting. If there is no matching Language code in your source, then no track will be selected." + } + }, + "documentation": "Use Dynamic audio selectors when you do not know the track layout of your source when you submit your job, but want to select multiple audio tracks. When you include an audio track in your output and specify this Dynamic audio selector as the Audio source, MediaConvert creates an output audio track for each dynamically selected track. Note that when you include a Dynamic audio selector for two or more inputs, each input must have the same number of audio tracks and audio channels." + }, + "DynamicAudioSelectorType": { + "type": "string", + "documentation": "Specify which audio tracks to dynamically select from your source. To select all audio tracks: Keep the default value, All tracks. To select all audio tracks with a specific language code: Choose Language code. When you do, you must also specify a language code under the Language code setting. If there is no matching Language code in your source, then no track will be selected.", + "enum": [ + "ALL_TRACKS", + "LANGUAGE_CODE" + ] + }, "Eac3AtmosBitstreamMode": { "type": "string", "documentation": "Specify the bitstream mode for the E-AC-3 stream that the encoder emits. For more information about the EAC3 bitstream mode, see ATSC A/52-2012 (Annex E).", @@ -6427,6 +6466,14 @@ "MAIN_422_10BIT_HIGH" ] }, + "H265Deblocking": { + "type": "string", + "documentation": "Use Deblocking to improve the video quality of your output by smoothing the edges of macroblock artifacts created during video compression. To reduce blocking artifacts at block boundaries, and improve overall video quality: Keep the default value, Enabled. To not apply any deblocking: Choose Disabled. Visible block edge artifacts might appear in the output, especially at lower bitrates.", + "enum": [ + "ENABLED", + "DISABLED" + ] + }, "H265DynamicSubGop": { "type": "string", "documentation": "Choose Adaptive to improve subjective video quality for high-motion content. This will cause the service to use fewer B-frames (which infer information based on other frames) for high-motion portions of the video and more B-frames for low-motion portions. The maximum number of B-frames is limited by the value you provide for the setting B frames between reference frames.", @@ -6602,6 +6649,11 @@ "locationName": "codecProfile", "documentation": "Represents the Profile and Tier, per the HEVC (H.265) specification. Selections are grouped as [Profile] / [Tier], so \"Main/High\" represents Main Profile with High Tier. 4:2:2 profiles are only available with the HEVC 4:2:2 License." }, + "Deblocking": { + "shape": "H265Deblocking", + "locationName": "deblocking", + "documentation": "Use Deblocking to improve the video quality of your output by smoothing the edges of macroblock artifacts created during video compression. To reduce blocking artifacts at block boundaries, and improve overall video quality: Keep the default value, Enabled. To not apply any deblocking: Choose Disabled. Visible block edge artifacts might appear in the output, especially at lower bitrates." + }, "DynamicSubGop": { "shape": "H265DynamicSubGop", "locationName": "dynamicSubGop", @@ -7652,6 +7704,11 @@ "locationName": "dolbyVisionMetadataXml", "documentation": "Use this setting only when your video source has Dolby Vision studio mastering metadata that is carried in a separate XML file. Specify the Amazon S3 location for the metadata XML file. MediaConvert uses this file to provide global and frame-level metadata for Dolby Vision preprocessing. When you specify a file here and your input also has interleaved global and frame level metadata, MediaConvert ignores the interleaved metadata and uses only the the metadata from this external XML file. Note that your IAM service role must grant MediaConvert read permissions to this file. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/iam-role.html." }, + "DynamicAudioSelectors": { + "shape": "__mapOfDynamicAudioSelector", + "locationName": "dynamicAudioSelectors", + "documentation": "Use Dynamic audio selectors when you do not know the track layout of your source when you submit your job, but want to select multiple audio tracks. When you include an audio track in your output and specify this Dynamic audio selector as the Audio source, MediaConvert creates an output audio track for each dynamically selected track. Note that when you include a Dynamic audio selector for two or more inputs, each input must have the same number of audio tracks and audio channels." + }, "FileInput": { "shape": "__stringMax2048PatternS3Https", "locationName": "fileInput", @@ -7889,6 +7946,11 @@ "locationName": "dolbyVisionMetadataXml", "documentation": "Use this setting only when your video source has Dolby Vision studio mastering metadata that is carried in a separate XML file. Specify the Amazon S3 location for the metadata XML file. MediaConvert uses this file to provide global and frame-level metadata for Dolby Vision preprocessing. When you specify a file here and your input also has interleaved global and frame level metadata, MediaConvert ignores the interleaved metadata and uses only the the metadata from this external XML file. Note that your IAM service role must grant MediaConvert read permissions to this file. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/iam-role.html." }, + "DynamicAudioSelectors": { + "shape": "__mapOfDynamicAudioSelector", + "locationName": "dynamicAudioSelectors", + "documentation": "Use Dynamic audio selectors when you do not know the track layout of your source when you submit your job, but want to select multiple audio tracks. When you include an audio track in your output and specify this Dynamic audio selector as the Audio source, MediaConvert creates an output audio track for each dynamically selected track. Note that when you include a Dynamic audio selector for two or more inputs, each input must have the same number of audio tracks and audio channels." + }, "FilterEnable": { "shape": "InputFilterEnable", "locationName": "filterEnable", @@ -8294,7 +8356,7 @@ "FollowSource": { "shape": "__integerMin1Max150", "locationName": "followSource", - "documentation": "Specify the input that MediaConvert references for your default output settings. MediaConvert uses this input's Resolution, Frame rate, and Pixel aspect ratio for all outputs that you don't manually specify different output settings for. Enabling this setting will disable \"Follow source\" for all other inputs. If MediaConvert cannot follow your source, for example if you specify an audio-only input, MediaConvert uses the first followable input instead. In your JSON job specification, enter an integer from 1 to 150 corresponding to the order of your inputs." + "documentation": "Specify the input that MediaConvert references for your default output settings. MediaConvert uses this input's Resolution, Frame rate, and Pixel aspect ratio for all outputs that you don't manually specify different output settings for. Enabling this setting will disable \"Follow source\" for all other inputs. If MediaConvert cannot follow your source, for example if you specify an audio-only input, MediaConvert uses the first followable input instead. In your JSON job specification, enter an integer from 1 to 150 corresponding to the order of your inputs." }, "Inputs": { "shape": "__listOfInput", @@ -8465,7 +8527,7 @@ "FollowSource": { "shape": "__integerMin1Max150", "locationName": "followSource", - "documentation": "Specify the input that MediaConvert references for your default output settings. MediaConvert uses this input's Resolution, Frame rate, and Pixel aspect ratio for all outputs that you don't manually specify different output settings for. Enabling this setting will disable \"Follow source\" for all other inputs. If MediaConvert cannot follow your source, for example if you specify an audio-only input, MediaConvert uses the first followable input instead. In your JSON job specification, enter an integer from 1 to 150 corresponding to the order of your inputs." + "documentation": "Specify the input that MediaConvert references for your default output settings. MediaConvert uses this input's Resolution, Frame rate, and Pixel aspect ratio for all outputs that you don't manually specify different output settings for. Enabling this setting will disable \"Follow source\" for all other inputs. If MediaConvert cannot follow your source, for example if you specify an audio-only input, MediaConvert uses the first followable input instead. In your JSON job specification, enter an integer from 1 to 150 corresponding to the order of your inputs." }, "Inputs": { "shape": "__listOfInputTemplate", @@ -14644,6 +14706,15 @@ "shape": "CaptionSelector" } }, + "__mapOfDynamicAudioSelector": { + "type": "map", + "key": { + "shape": "__string" + }, + "value": { + "shape": "DynamicAudioSelector" + } + }, "__mapOf__string": { "type": "map", "key": { diff --git a/botocore/data/medialive/2017-10-14/service-2.json b/botocore/data/medialive/2017-10-14/service-2.json index 6d768f3cfd..80578f2acb 100644 --- a/botocore/data/medialive/2017-10-14/service-2.json +++ b/botocore/data/medialive/2017-10-14/service-2.json @@ -16858,6 +16858,16 @@ "shape": "StaticImageOutputDeactivateScheduleActionSettings", "locationName": "staticImageOutputDeactivateSettings", "documentation": "Action to deactivate a static image overlay in one or more specified outputs" + }, + "Id3SegmentTaggingSettings": { + "shape": "Id3SegmentTaggingScheduleActionSettings", + "locationName": "id3SegmentTaggingSettings", + "documentation": "Action to insert ID3 metadata in every segment, in applicable output groups" + }, + "TimedMetadataSettings": { + "shape": "TimedMetadataScheduleActionSettings", + "locationName": "timedMetadataSettings", + "documentation": "Action to insert ID3 metadata once, in applicable output groups" } }, "documentation": "Holds the settings for a single schedule action." @@ -20760,6 +20770,16 @@ "shape": "__stringMax100", "locationName": "scte35NameModifier", "documentation": "Change the modifier that MediaLive automatically adds to the Streams() name for a SCTE 35 track. The default is \"scte\", which means the default name will be Streams(scte.cmfm). Any string you enter here will replace the \"scte\" string.\\nThe modifier can only contain: numbers, letters, plus (+), minus (-), underscore (_) and period (.) and has a maximum length of 100 characters." + }, + "Id3Behavior": { + "shape": "CmafId3Behavior", + "locationName": "id3Behavior", + "documentation": "Set to ENABLED to enable ID3 metadata insertion. To include metadata, you configure other parameters in the output group, or you add an ID3 action to the channel schedule." + }, + "Id3NameModifier": { + "shape": "__stringMax100", + "locationName": "id3NameModifier", + "documentation": "Change the modifier that MediaLive automatically adds to the Streams() name that identifies an ID3 track. The default is \"id3\", which means the default name will be Streams(id3.cmfm). Any string you enter here will replace the \"id3\" string.\\nThe modifier can only contain: numbers, letters, plus (+), minus (-), underscore (_) and period (.) and has a maximum length of 100 characters." } }, "documentation": "Cmaf Ingest Group Settings", @@ -28319,6 +28339,44 @@ "shape": "ChannelEngineVersionResponse" }, "documentation": "Placeholder documentation for __listOfChannelEngineVersionResponse" + }, + "CmafId3Behavior": { + "type": "string", + "documentation": "Cmaf Id3 Behavior", + "enum": [ + "DISABLED", + "ENABLED" + ] + }, + "Id3SegmentTaggingScheduleActionSettings": { + "type": "structure", + "members": { + "Id3": { + "shape": "__string", + "locationName": "id3", + "documentation": "Complete this parameter if you want to specify the entire ID3 metadata. Enter a base64 string that contains one or more fully formed ID3 tags, according to the ID3 specification: http://id3.org/id3v2.4.0-structure" + }, + "Tag": { + "shape": "__string", + "locationName": "tag", + "documentation": "Complete this parameter if you want to specify only the metadata, not the entire frame. MediaLive will insert the metadata in a TXXX frame. Enter the value as plain text. You can include standard MediaLive variable data such as the current segment number." + } + }, + "documentation": "Settings for the action to insert ID3 metadata in every segment, in applicable output groups." + }, + "TimedMetadataScheduleActionSettings": { + "type": "structure", + "members": { + "Id3": { + "shape": "__string", + "locationName": "id3", + "documentation": "Enter a base64 string that contains one or more fully formed ID3 tags.See the ID3 specification: http://id3.org/id3v2.4.0-structure" + } + }, + "documentation": "Settings for the action to insert ID3 metadata (as a one-time action) in applicable output groups.", + "required": [ + "Id3" + ] } }, "documentation": "API for AWS Elemental MediaLive" diff --git a/botocore/data/mediatailor/2018-04-23/service-2.json b/botocore/data/mediatailor/2018-04-23/service-2.json index 4f3850ec88..0366d86d89 100644 --- a/botocore/data/mediatailor/2018-04-23/service-2.json +++ b/botocore/data/mediatailor/2018-04-23/service-2.json @@ -2,9 +2,10 @@ "version":"2.0", "metadata":{ "apiVersion":"2018-04-23", + "auth":["aws.auth#sigv4"], "endpointPrefix":"api.mediatailor", - "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceAbbreviation":"MediaTailor", "serviceFullName":"AWS MediaTailor", "serviceId":"MediaTailor", @@ -33,7 +34,7 @@ }, "input":{"shape":"ConfigureLogsForPlaybackConfigurationRequest"}, "output":{"shape":"ConfigureLogsForPlaybackConfigurationResponse"}, - "documentation":"Amazon CloudWatch log settings for a playback configuration.
", + "documentation":"Defines where AWS Elemental MediaTailor sends logs for the playback configuration.
", "idempotent":true }, "CreateChannel":{ @@ -557,10 +558,6 @@ "type":"structure", "required":["OffsetMillis"], "members":{ - "AdBreakMetadata":{ - "shape":"AdBreakMetadataList", - "documentation":"Defines a list of key/value pairs that MediaTailor generates within the EXT-X-ASSET
tag for SCTE35_ENHANCED
output.
The SCTE-35 ad insertion type. Accepted value: SPLICE_INSERT
, TIME_SIGNAL
.
Defines the SCTE-35 time_signal
message inserted around the ad.
Programs on a channel's schedule can be configured with one or more ad breaks. You can attach a splice_insert
SCTE-35 message to the ad break. This message provides basic metadata about the ad break.
See section 9.7.4 of the 2022 SCTE-35 specification for more information.
" + }, + "AdBreakMetadata":{ + "shape":"AdBreakMetadataList", + "documentation":"Defines a list of key/value pairs that MediaTailor generates within the EXT-X-ASSET
tag for SCTE35_ENHANCED
output.
Ad break configuration parameters.
" @@ -604,6 +605,17 @@ }, "documentation":"A location at which a zero-duration ad marker was detected in a VOD source manifest.
" }, + "AdConditioningConfiguration":{ + "type":"structure", + "required":["StreamingMediaFileConditioning"], + "members":{ + "StreamingMediaFileConditioning":{ + "shape":"StreamingMediaFileConditioning", + "documentation":"For ads that have media files with streaming delivery, indicates what transcoding action MediaTailor it first receives these ads from the ADS. TRANSCODE
indicates that MediaTailor must transcode the ads. NONE
indicates that you have already transcoded the ads outside of MediaTailor and don't need them transcoded as part of the ad insertion workflow. For more information about ad conditioning see https://docs.aws.amazon.com/precondition-ads.html.
The setting that indicates what conditioning MediaTailor will perform on ads that the ad decision server (ADS) returns.
" + }, "AdMarkerPassthrough":{ "type":"structure", "members":{ @@ -639,10 +651,6 @@ "shape":"__string", "documentation":"If an alert is generated for a resource, an explanation of the reason for the alert.
" }, - "Category":{ - "shape":"AlertCategory", - "documentation":"The category that MediaTailor assigns to the alert.
" - }, "LastModifiedTime":{ "shape":"__timestampUnix", "documentation":"The timestamp when the alert was last modified.
" @@ -654,6 +662,10 @@ "ResourceArn":{ "shape":"__string", "documentation":"The Amazon Resource Name (ARN) of the resource.
" + }, + "Category":{ + "shape":"AlertCategory", + "documentation":"The category that MediaTailor assigns to the alert.
" } }, "documentation":"Alert configuration parameters.
" @@ -669,30 +681,30 @@ "AlternateMedia":{ "type":"structure", "members":{ - "AdBreaks":{ - "shape":"__listOfAdBreak", - "documentation":"Ad break configuration parameters defined in AlternateMedia.
" - }, - "ClipRange":{"shape":"ClipRange"}, - "DurationMillis":{ - "shape":"__long", - "documentation":"The duration of the alternateMedia in milliseconds.
" + "SourceLocationName":{ + "shape":"__string", + "documentation":"The name of the source location for alternateMedia.
" }, "LiveSourceName":{ "shape":"__string", "documentation":"The name of the live source for alternateMedia.
" }, + "VodSourceName":{ + "shape":"__string", + "documentation":"The name of the VOD source for alternateMedia.
" + }, + "ClipRange":{"shape":"ClipRange"}, "ScheduledStartTimeMillis":{ "shape":"__long", "documentation":"The date and time that the alternateMedia is scheduled to start, in epoch milliseconds.
" }, - "SourceLocationName":{ - "shape":"__string", - "documentation":"The name of the source location for alternateMedia.
" + "AdBreaks":{ + "shape":"__listOfAdBreak", + "documentation":"Ad break configuration parameters defined in AlternateMedia.
" }, - "VodSourceName":{ - "shape":"__string", - "documentation":"The name of the VOD source for alternateMedia.
" + "DurationMillis":{ + "shape":"__long", + "documentation":"The duration of the alternateMedia in milliseconds.
" } }, "documentation":"A playlist of media (VOD and/or live) to be played instead of the default media on a particular program.
" @@ -700,13 +712,13 @@ "AudienceMedia":{ "type":"structure", "members":{ - "AlternateMedia":{ - "shape":"__listOfAlternateMedia", - "documentation":"The list of AlternateMedia defined in AudienceMedia.
" - }, "Audience":{ "shape":"__string", "documentation":"The Audience defined in AudienceMedia.
" + }, + "AlternateMedia":{ + "shape":"__listOfAlternateMedia", + "documentation":"The list of AlternateMedia defined in AudienceMedia.
" } }, "documentation":"An AudienceMedia object contains an Audience and a list of AlternateMedia.
" @@ -736,10 +748,6 @@ "AvailSuppression":{ "type":"structure", "members":{ - "FillPolicy":{ - "shape":"FillPolicy", - "documentation":"Defines the policy to apply to the avail suppression mode. BEHIND_LIVE_EDGE
will always use the full avail suppression policy. AFTER_LIVE_EDGE
mode can be used to invoke partial ad break fills when a session starts mid-break.
Sets the ad suppression mode. By default, ad suppression is off and all ad breaks are filled with ads or slate. When Mode is set to BEHIND_LIVE_EDGE
, ad suppression is active and MediaTailor won't fill ad breaks on or behind the ad suppression Value time in the manifest lookback window. When Mode is set to AFTER_LIVE_EDGE
, ad suppression is active and MediaTailor won't fill ad breaks that are within the live edge plus the avail suppression value.
A live edge offset time in HH:MM:SS. MediaTailor won't fill ad breaks on or behind this time in the manifest lookback window. If Value is set to 00:00:00, it is in sync with the live edge, and MediaTailor won't fill any ad breaks on or behind the live edge. If you set a Value time, MediaTailor won't fill any ad breaks on or behind this time in the manifest lookback window. For example, if you set 00:45:00, then MediaTailor will fill ad breaks that occur within 45 minutes behind the live edge, but won't fill ad breaks on or behind 45 minutes behind the live edge.
" + }, + "FillPolicy":{ + "shape":"FillPolicy", + "documentation":"Defines the policy to apply to the avail suppression mode. BEHIND_LIVE_EDGE
will always use the full avail suppression policy. AFTER_LIVE_EDGE
mode can be used to invoke partial ad break fills when a session starts mid-break.
The configuration for avail suppression, also known as ad suppression. For more information about ad suppression, see Ad Suppression.
" @@ -797,20 +809,16 @@ "Arn", "ChannelName", "ChannelState", - "LogConfiguration", "Outputs", "PlaybackMode", - "Tier" + "Tier", + "LogConfiguration" ], "members":{ "Arn":{ "shape":"__string", "documentation":"The ARN of the channel.
" }, - "Audiences":{ - "shape":"Audiences", - "documentation":"The list of audiences defined in channel.
" - }, "ChannelName":{ "shape":"__string", "documentation":"The name of the channel.
" @@ -831,10 +839,6 @@ "shape":"__timestampUnix", "documentation":"The timestamp of when the channel was last modified.
" }, - "LogConfiguration":{ - "shape":"LogConfigurationForChannel", - "documentation":"The log configuration.
" - }, "Outputs":{ "shape":"ResponseOutputs", "documentation":"The channel's output properties.
" @@ -851,6 +855,14 @@ "Tier":{ "shape":"__string", "documentation":"The tier for this channel. STANDARD tier channels can contain live programs.
" + }, + "LogConfiguration":{ + "shape":"LogConfigurationForChannel", + "documentation":"The log configuration.
" + }, + "Audiences":{ + "shape":"Audiences", + "documentation":"The list of audiences defined in channel.
" } }, "documentation":"The configuration parameters for a channel. For information about MediaTailor channels, see Working with channels in the MediaTailor User Guide.
" @@ -940,7 +952,7 @@ "members":{ "PercentEnabled":{ "shape":"__integer", - "documentation":"The percentage of session logs that MediaTailor sends to your Cloudwatch Logs account. For example, if your playback configuration has 1000 sessions and percentEnabled is set to 60
, MediaTailor sends logs for 600 of the sessions to CloudWatch Logs. MediaTailor decides at random which of the playback configuration sessions to send logs for. If you want to view logs for a specific session, you can use the debug log mode.
Valid values: 0
- 100
The percentage of session logs that MediaTailor sends to your CloudWatch Logs account. For example, if your playback configuration has 1000 sessions and percentEnabled is set to 60
, MediaTailor sends logs for 600 of the sessions to CloudWatch Logs. MediaTailor decides at random which of the playback configuration sessions to send logs for. If you want to view logs for a specific session, you can use the debug log mode.
Valid values: 0
- 100
The list of audiences defined in channel.
" - }, "ChannelName":{ "shape":"__string", "documentation":"The name of the channel.
", @@ -1005,6 +1013,10 @@ "TimeShiftConfiguration":{ "shape":"TimeShiftConfiguration", "documentation":"The time-shifted viewing configuration you want to associate to the channel.
" + }, + "Audiences":{ + "shape":"Audiences", + "documentation":"The list of audiences defined in channel.
" } } }, @@ -1015,10 +1027,6 @@ "shape":"__string", "documentation":"The Amazon Resource Name (ARN) to assign to the channel.
" }, - "Audiences":{ - "shape":"Audiences", - "documentation":"The list of audiences defined in channel.
" - }, "ChannelName":{ "shape":"__string", "documentation":"The name to assign to the channel.
" @@ -1059,6 +1067,10 @@ "TimeShiftConfiguration":{ "shape":"TimeShiftConfiguration", "documentation":"The time-shifted viewing configuration assigned to the channel.
" + }, + "Audiences":{ + "shape":"Audiences", + "documentation":"The list of audiences defined in channel.
" } } }, @@ -1204,10 +1216,6 @@ "shape":"__listOfAdBreak", "documentation":"The ad break configuration settings.
" }, - "AudienceMedia":{ - "shape":"__listOfAudienceMedia", - "documentation":"The list of AudienceMedia defined in program.
" - }, "ChannelName":{ "shape":"__string", "documentation":"The name of the channel for this Program.
", @@ -1235,6 +1243,10 @@ "VodSourceName":{ "shape":"__string", "documentation":"The name that's used to refer to a VOD source.
" + }, + "AudienceMedia":{ + "shape":"__listOfAudienceMedia", + "documentation":"The list of AudienceMedia defined in program.
" } } }, @@ -1249,26 +1261,14 @@ "shape":"__string", "documentation":"The ARN to assign to the program.
" }, - "AudienceMedia":{ - "shape":"__listOfAudienceMedia", - "documentation":"The list of AudienceMedia defined in program.
" - }, "ChannelName":{ "shape":"__string", "documentation":"The name to assign to the channel for this program.
" }, - "ClipRange":{ - "shape":"ClipRange", - "documentation":"The clip range configuration settings.
" - }, "CreationTime":{ "shape":"__timestampUnix", "documentation":"The time the program was created.
" }, - "DurationMillis":{ - "shape":"__long", - "documentation":"The duration of the live program in milliseconds.
" - }, "LiveSourceName":{ "shape":"__string", "documentation":"The name of the LiveSource for this Program.
" @@ -1288,6 +1288,18 @@ "VodSourceName":{ "shape":"__string", "documentation":"The name that's used to refer to a VOD source.
" + }, + "ClipRange":{ + "shape":"ClipRange", + "documentation":"The clip range configuration settings.
" + }, + "DurationMillis":{ + "shape":"__long", + "documentation":"The duration of the live program in milliseconds.
" + }, + "AudienceMedia":{ + "shape":"__listOfAudienceMedia", + "documentation":"The list of AudienceMedia defined in program.
" } } }, @@ -1690,10 +1702,6 @@ "shape":"__string", "documentation":"The ARN of the channel.
" }, - "Audiences":{ - "shape":"Audiences", - "documentation":"The list of audiences defined in channel.
" - }, "ChannelName":{ "shape":"__string", "documentation":"The name of the channel.
" @@ -1714,10 +1722,6 @@ "shape":"__timestampUnix", "documentation":"The timestamp of when the channel was last modified.
" }, - "LogConfiguration":{ - "shape":"LogConfigurationForChannel", - "documentation":"The log configuration for the channel.
" - }, "Outputs":{ "shape":"ResponseOutputs", "documentation":"The channel's output properties.
" @@ -1735,9 +1739,17 @@ "shape":"__string", "documentation":"The channel's tier.
" }, + "LogConfiguration":{ + "shape":"LogConfigurationForChannel", + "documentation":"The log configuration for the channel.
" + }, "TimeShiftConfiguration":{ "shape":"TimeShiftConfiguration", "documentation":"The time-shifted viewing configuration for the channel.
" + }, + "Audiences":{ + "shape":"Audiences", + "documentation":"The list of audiences defined in channel.
" } } }, @@ -1828,26 +1840,14 @@ "shape":"__string", "documentation":"The ARN of the program.
" }, - "AudienceMedia":{ - "shape":"__listOfAudienceMedia", - "documentation":"The list of AudienceMedia defined in program.
" - }, "ChannelName":{ "shape":"__string", "documentation":"The name of the channel that the program belongs to.
" }, - "ClipRange":{ - "shape":"ClipRange", - "documentation":"The clip range configuration settings.
" - }, "CreationTime":{ "shape":"__timestampUnix", "documentation":"The timestamp of when the program was created.
" }, - "DurationMillis":{ - "shape":"Long", - "documentation":"The duration of the live program in milliseconds.
" - }, "LiveSourceName":{ "shape":"__string", "documentation":"The name of the LiveSource for this Program.
" @@ -1867,6 +1867,18 @@ "VodSourceName":{ "shape":"__string", "documentation":"The name that's used to refer to a VOD source.
" + }, + "ClipRange":{ + "shape":"ClipRange", + "documentation":"The clip range configuration settings.
" + }, + "DurationMillis":{ + "shape":"Long", + "documentation":"The duration of the live program in milliseconds.
" + }, + "AudienceMedia":{ + "shape":"__listOfAudienceMedia", + "documentation":"The list of AudienceMedia defined in program.
" } } }, @@ -2015,12 +2027,6 @@ "type":"structure", "required":["ChannelName"], "members":{ - "Audience":{ - "shape":"__string", - "documentation":"The single audience for GetChannelScheduleRequest.
", - "location":"querystring", - "locationName":"audience" - }, "ChannelName":{ "shape":"__string", "documentation":"The name of the channel associated with this Channel Schedule.
", @@ -2044,6 +2050,12 @@ "documentation":"(Optional) If the playback configuration has more than MaxResults
channel schedules, use NextToken
to get the second and subsequent pages of results.
For the first GetChannelScheduleRequest
request, omit this value.
For the second and subsequent requests, get the value of NextToken
from the previous response and specify that value for NextToken
in the request.
If the previous response didn't include a NextToken
element, there are no more channel schedules to get.
The single audience for GetChannelScheduleRequest.
", + "location":"querystring", + "locationName":"audience" } } }, @@ -2093,7 +2105,7 @@ }, "ConfigurationAliases":{ "shape":"ConfigurationAliasesResponse", - "documentation":"The player parameters and aliases used as dynamic variables during session initialization. For more information, see Domain Variables.
" + "documentation":"The player parameters and aliases used as dynamic variables during session initialization. For more information, see Domain Variables.
" }, "DashConfiguration":{ "shape":"DashConfiguration", @@ -2113,7 +2125,7 @@ }, "LogConfiguration":{ "shape":"LogConfiguration", - "documentation":"The Amazon CloudWatch log settings for a playback configuration.
" + "documentation":"The configuration that defines where AWS Elemental MediaTailor sends logs for the playback configuration.
" }, "ManifestProcessingRules":{ "shape":"ManifestProcessingRules", @@ -2155,6 +2167,10 @@ "VideoContentSourceUrl":{ "shape":"__string", "documentation":"The URL prefix for the parent manifest for the stream, minus the asset ID. The maximum length is 512 characters.
" + }, + "AdConditioningConfiguration":{ + "shape":"AdConditioningConfiguration", + "documentation":"The setting that indicates what conditioning MediaTailor will perform on ads that the ad decision server (ADS) returns.
" } } }, @@ -2221,13 +2237,13 @@ "HlsPlaylistSettings":{ "type":"structure", "members":{ - "AdMarkupType":{ - "shape":"adMarkupTypes", - "documentation":"Determines the type of SCTE 35 tags to use in ad markup. Specify DATERANGE
to use DATERANGE
tags (for live or VOD content). Specify SCTE35_ENHANCED
to use EXT-X-CUE-OUT
and EXT-X-CUE-IN
tags (for VOD content only).
The total duration (in seconds) of each manifest. Minimum value: 30
seconds. Maximum value: 3600
seconds.
Determines the type of SCTE 35 tags to use in ad markup. Specify DATERANGE
to use DATERANGE
tags (for live or VOD content). Specify SCTE35_ENHANCED
to use EXT-X-CUE-OUT
and EXT-X-CUE-IN
tags (for VOD content only).
HLS playlist configuration parameters.
" @@ -2622,10 +2638,10 @@ "members":{ "PercentEnabled":{ "shape":"__integer", - "documentation":"The percentage of session logs that MediaTailor sends to your Cloudwatch Logs account. For example, if your playback configuration has 1000 sessions and percentEnabled
is set to 60
, MediaTailor sends logs for 600 of the sessions to CloudWatch Logs. MediaTailor decides at random which of the playback configuration sessions to send logs for. If you want to view logs for a specific session, you can use the debug log mode.
Valid values: 0
- 100
The percentage of session logs that MediaTailor sends to your configured log destination. For example, if your playback configuration has 1000 sessions and percentEnabled
is set to 60
, MediaTailor sends logs for 600 of the sessions to CloudWatch Logs. MediaTailor decides at random which of the playback configuration sessions to send logs for. If you want to view logs for a specific session, you can use the debug log mode.
Valid values: 0
- 100
Returns Amazon CloudWatch log settings for a playback configuration.
" + "documentation":"Defines where AWS Elemental MediaTailor sends logs for the playback configuration.
" }, "LogConfigurationForChannel":{ "type":"structure", @@ -2712,7 +2728,7 @@ }, "ConfigurationAliases":{ "shape":"ConfigurationAliasesResponse", - "documentation":"The player parameters and aliases used as dynamic variables during session initialization. For more information, see Domain Variables.
" + "documentation":"The player parameters and aliases used as dynamic variables during session initialization. For more information, see Domain Variables.
" }, "DashConfiguration":{ "shape":"DashConfiguration", @@ -2732,7 +2748,7 @@ }, "LogConfiguration":{ "shape":"LogConfiguration", - "documentation":"The Amazon CloudWatch log settings for a playback configuration.
" + "documentation":"Defines where AWS Elemental MediaTailor sends logs for the playback configuration.
" }, "ManifestProcessingRules":{ "shape":"ManifestProcessingRules", @@ -2774,6 +2790,10 @@ "VideoContentSourceUrl":{ "shape":"__string", "documentation":"The URL prefix for the parent manifest for the stream, minus the asset ID. The maximum length is 512 characters.
" + }, + "AdConditioningConfiguration":{ + "shape":"AdConditioningConfiguration", + "documentation":"The setting that indicates what conditioning MediaTailor will perform on ads that the ad decision server (ADS) returns.
" } }, "documentation":"A playback configuration. For information about MediaTailor configurations, see Working with configurations in AWS Elemental MediaTailor.
" @@ -2799,7 +2819,7 @@ }, "StartTime":{ "shape":"__timestampUnix", - "documentation":"The time when prefetched ads are considered for use in an ad break. If you don't specify StartTime
, the prefetched ads are available after MediaTailor retrives them from the ad decision server.
The time when prefetched ads are considered for use in an ad break. If you don't specify StartTime
, the prefetched ads are available after MediaTailor retrieves them from the ad decision server.
A complex type that contains settings that determine how and when that MediaTailor places prefetched ads into upcoming ad breaks.
" @@ -2906,7 +2926,7 @@ }, "ConfigurationAliases":{ "shape":"ConfigurationAliasesRequest", - "documentation":"The player parameters and aliases used as dynamic variables during session initialization. For more information, see Domain Variables.
" + "documentation":"The player parameters and aliases used as dynamic variables during session initialization. For more information, see Domain Variables.
" }, "DashConfiguration":{ "shape":"DashConfigurationForPut", @@ -2948,6 +2968,10 @@ "VideoContentSourceUrl":{ "shape":"__string", "documentation":"The URL prefix for the parent manifest for the stream, minus the asset ID. The maximum length is 512 characters.
" + }, + "AdConditioningConfiguration":{ + "shape":"AdConditioningConfiguration", + "documentation":"The setting that indicates what conditioning MediaTailor will perform on ads that the ad decision server (ADS) returns.
" } } }, @@ -2972,7 +2996,7 @@ }, "ConfigurationAliases":{ "shape":"ConfigurationAliasesResponse", - "documentation":"The player parameters and aliases used as dynamic variables during session initialization. For more information, see Domain Variables.
" + "documentation":"The player parameters and aliases used as dynamic variables during session initialization. For more information, see Domain Variables.
" }, "DashConfiguration":{ "shape":"DashConfiguration", @@ -2992,7 +3016,7 @@ }, "LogConfiguration":{ "shape":"LogConfiguration", - "documentation":"The Amazon CloudWatch log settings for a playback configuration.
" + "documentation":"The configuration that defines where AWS Elemental MediaTailor sends logs for the playback configuration.
" }, "ManifestProcessingRules":{ "shape":"ManifestProcessingRules", @@ -3034,6 +3058,10 @@ "VideoContentSourceUrl":{ "shape":"__string", "documentation":"The URL prefix for the parent manifest for the stream, minus the asset ID. The maximum length is 512 characters.
" + }, + "AdConditioningConfiguration":{ + "shape":"AdConditioningConfiguration", + "documentation":"The setting that indicates what conditioning MediaTailor will perform on ads that the ad decision server (ADS) returns.
" } } }, @@ -3136,13 +3164,13 @@ "type":"structure", "required":["Transition"], "members":{ - "ClipRange":{ - "shape":"ClipRange", - "documentation":"Program clip range configuration.
" - }, "Transition":{ "shape":"Transition", "documentation":"Program transition configurations.
" + }, + "ClipRange":{ + "shape":"ClipRange", + "documentation":"Program clip range configuration.
" } }, "documentation":"Schedule configuration parameters. A channel must be stopped before changes can be made to the schedule.
" @@ -3168,10 +3196,6 @@ "shape":"__string", "documentation":"The ARN of the program.
" }, - "Audiences":{ - "shape":"Audiences", - "documentation":"The list of audiences defined in ScheduleEntry.
" - }, "ChannelName":{ "shape":"__string", "documentation":"The name of the channel that uses this schedule.
" @@ -3199,6 +3223,10 @@ "VodSourceName":{ "shape":"__string", "documentation":"The name of the VOD source.
" + }, + "Audiences":{ + "shape":"Audiences", + "documentation":"The list of audiences defined in ScheduleEntry.
" } }, "documentation":"The properties for a schedule.
" @@ -3246,25 +3274,25 @@ "SegmentationDescriptor":{ "type":"structure", "members":{ - "SegmentNum":{ - "shape":"Integer", - "documentation":"The segment number to assign to the segmentation_descriptor.segment_num
message, as defined in section 10.3.3.1 of the 2022 SCTE-35 specification Values must be between 0 and 256, inclusive. The default value is 0.
The Event Identifier to assign to the segmentation_descriptor.segmentation_event_id
message, as defined in section 10.3.3.1 of the 2022 SCTE-35 specification. The default value is 1.
The Type Identifier to assign to the segmentation_descriptor.segmentation_type_id
message, as defined in section 10.3.3.1 of the 2022 SCTE-35 specification. Values must be between 0 and 256, inclusive. The default value is 48.
The Upid Type to assign to the segmentation_descriptor.segmentation_upid_type
message, as defined in section 10.3.3.1 of the 2022 SCTE-35 specification. Values must be between 0 and 256, inclusive. The default value is 14.
The Upid to assign to the segmentation_descriptor.segmentation_upid
message, as defined in section 10.3.3.1 of the 2022 SCTE-35 specification. The value must be a hexadecimal string containing only the characters 0 though 9 and A through F. The default value is \"\" (an empty string).
The Upid Type to assign to the segmentation_descriptor.segmentation_upid_type
message, as defined in section 10.3.3.1 of the 2022 SCTE-35 specification. Values must be between 0 and 256, inclusive. The default value is 14.
The Type Identifier to assign to the segmentation_descriptor.segmentation_type_id
message, as defined in section 10.3.3.1 of the 2022 SCTE-35 specification. Values must be between 0 and 256, inclusive. The default value is 48.
The segment number to assign to the segmentation_descriptor.segment_num
message, as defined in section 10.3.3.1 of the 2022 SCTE-35 specification Values must be between 0 and 256, inclusive. The default value is 0.
The list of audiences defined in channel.
" - }, "ChannelName":{ "shape":"__string", "documentation":"The name of the channel.
", @@ -3538,6 +3569,10 @@ "TimeShiftConfiguration":{ "shape":"TimeShiftConfiguration", "documentation":"The time-shifted viewing configuration you want to associate to the channel.
" + }, + "Audiences":{ + "shape":"Audiences", + "documentation":"The list of audiences defined in channel.
" } } }, @@ -3548,10 +3583,6 @@ "shape":"__string", "documentation":"The Amazon Resource Name (ARN) associated with the channel.
" }, - "Audiences":{ - "shape":"Audiences", - "documentation":"The list of audiences defined in channel.
" - }, "ChannelName":{ "shape":"__string", "documentation":"The name of the channel.
" @@ -3592,6 +3623,10 @@ "TimeShiftConfiguration":{ "shape":"TimeShiftConfiguration", "documentation":"The time-shifted viewing configuration for the channel.
" + }, + "Audiences":{ + "shape":"Audiences", + "documentation":"The list of audiences defined in channel.
" } } }, @@ -3667,10 +3702,6 @@ "shape":"__listOfAdBreak", "documentation":"The ad break configuration settings.
" }, - "AudienceMedia":{ - "shape":"__listOfAudienceMedia", - "documentation":"The list of AudienceMedia defined in program.
" - }, "ChannelName":{ "shape":"__string", "documentation":"The name of the channel for this Program.
", @@ -3686,6 +3717,10 @@ "ScheduleConfiguration":{ "shape":"UpdateProgramScheduleConfiguration", "documentation":"The schedule configuration settings.
" + }, + "AudienceMedia":{ + "shape":"__listOfAudienceMedia", + "documentation":"The list of AudienceMedia defined in program.
" } } }, @@ -3700,38 +3735,18 @@ "shape":"__string", "documentation":"The ARN to assign to the program.
" }, - "AudienceMedia":{ - "shape":"__listOfAudienceMedia", - "documentation":"The list of AudienceMedia defined in program.
" - }, "ChannelName":{ "shape":"__string", "documentation":"The name to assign to the channel for this program.
" }, - "ClipRange":{ - "shape":"ClipRange", - "documentation":"The clip range configuration settings.
" - }, "CreationTime":{ "shape":"__timestampUnix", "documentation":"The time the program was created.
" }, - "DurationMillis":{ - "shape":"__long", - "documentation":"The duration of the live program in milliseconds.
" - }, - "LiveSourceName":{ - "shape":"__string", - "documentation":"The name of the LiveSource for this Program.
" - }, "ProgramName":{ "shape":"__string", "documentation":"The name to assign to this program.
" }, - "ScheduledStartTime":{ - "shape":"__timestampUnix", - "documentation":"The scheduled start time for this Program.
" - }, "SourceLocationName":{ "shape":"__string", "documentation":"The name to assign to the source location for this program.
" @@ -3739,19 +3754,39 @@ "VodSourceName":{ "shape":"__string", "documentation":"The name that's used to refer to a VOD source.
" + }, + "LiveSourceName":{ + "shape":"__string", + "documentation":"The name of the LiveSource for this Program.
" + }, + "ClipRange":{ + "shape":"ClipRange", + "documentation":"The clip range configuration settings.
" + }, + "DurationMillis":{ + "shape":"__long", + "documentation":"The duration of the live program in milliseconds.
" + }, + "ScheduledStartTime":{ + "shape":"__timestampUnix", + "documentation":"The scheduled start time for this Program.
" + }, + "AudienceMedia":{ + "shape":"__listOfAudienceMedia", + "documentation":"The list of AudienceMedia defined in program.
" } } }, "UpdateProgramScheduleConfiguration":{ "type":"structure", "members":{ - "ClipRange":{ - "shape":"ClipRange", - "documentation":"Program clip range configuration.
" - }, "Transition":{ "shape":"UpdateProgramTransition", "documentation":"Program transition configuration.
" + }, + "ClipRange":{ + "shape":"ClipRange", + "documentation":"Program clip range configuration.
" } }, "documentation":"Schedule configuration parameters.
" @@ -3759,13 +3794,13 @@ "UpdateProgramTransition":{ "type":"structure", "members":{ - "DurationMillis":{ - "shape":"__long", - "documentation":"The duration of the live program in seconds.
" - }, "ScheduledStartTimeMillis":{ "shape":"__long", "documentation":"The date and time that the program is scheduled to start, in epoch milliseconds.
" + }, + "DurationMillis":{ + "shape":"__long", + "documentation":"The duration of the live program in seconds.
" } }, "documentation":"Program transition configuration.
" diff --git a/botocore/data/notifications/2018-05-10/paginators-1.json b/botocore/data/notifications/2018-05-10/paginators-1.json index d3eca60dd4..8cd3019370 100644 --- a/botocore/data/notifications/2018-05-10/paginators-1.json +++ b/botocore/data/notifications/2018-05-10/paginators-1.json @@ -29,6 +29,30 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "notificationHubs" + }, + "ListManagedNotificationChannelAssociations": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "channelAssociations" + }, + "ListManagedNotificationChildEvents": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "managedNotificationChildEvents" + }, + "ListManagedNotificationConfigurations": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "managedNotificationConfigurations" + }, + "ListManagedNotificationEvents": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "managedNotificationEvents" } } } diff --git a/botocore/data/notifications/2018-05-10/service-2.json b/botocore/data/notifications/2018-05-10/service-2.json index 13b0c9b03c..86b5971e46 100644 --- a/botocore/data/notifications/2018-05-10/service-2.json +++ b/botocore/data/notifications/2018-05-10/service-2.json @@ -31,7 +31,49 @@ {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"Associates a delivery Channel with a particular NotificationConfiguration. Supported Channels include AWS Chatbot, the AWS Console Mobile Application, and emails (notifications-contacts).
", + "documentation":"Associates a delivery Channel with a particular NotificationConfiguration
. Supported Channels include Chatbot, the Console Mobile Application, and emails (notifications-contacts).
Associates an Account Contact with a particular ManagedNotificationConfiguration
.
Associates an additional Channel with a particular ManagedNotificationConfiguration
.
Supported Channels include Chatbot, the Console Mobile Application, and emails (notifications-contacts).
", "idempotent":true }, "CreateEventRule":{ @@ -52,7 +94,7 @@ {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"Creates an EventRule that is associated with a specified Notification Configuration.
", + "documentation":"Creates an EventRule
that is associated with a specified NotificationConfiguration
.
Creates a new NotificationConfiguration.
", + "documentation":"Creates a new NotificationConfiguration
.
Deletes an EventRule.
", + "documentation":"Deletes an EventRule
.
Deletes a NotificationConfiguration.
", + "documentation":"Deletes a NotificationConfiguration
.
Deregisters a NotificationHub in the specified Region.
You can't deregister the last NotificationHub in the account. NotificationEvents stored in the deregistered NotificationHub are no longer be visible. Recreating a new NotificationHub in the same Region restores access to those NotificationEvents.
Deregisters a NotificationConfiguration
in the specified Region.
You can't deregister the last NotificationHub
in the account. NotificationEvents
stored in the deregistered NotificationConfiguration
are no longer be visible. Recreating a new NotificationConfiguration
in the same Region restores access to those NotificationEvents
.
Disables service trust between User Notifications and Amazon Web Services Organizations.
", "idempotent":true }, "DisassociateChannel":{ @@ -151,7 +214,67 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"Disassociates a Channel from a specified NotificationConfiguration. Supported Channels include AWS Chatbot, the AWS Console Mobile Application, and emails (notifications-contacts).
", + "documentation":"Disassociates a Channel from a specified NotificationConfiguration
. Supported Channels include Chatbot, the Console Mobile Application, and emails (notifications-contacts).
Disassociates an Account Contact with a particular ManagedNotificationConfiguration
.
Disassociates an additional Channel from a particular ManagedNotificationConfiguration
.
Supported Channels include Chatbot, the Console Mobile Application, and emails (notifications-contacts).
", + "idempotent":true + }, + "EnableNotificationsAccessForOrganization":{ + "name":"EnableNotificationsAccessForOrganization", + "http":{ + "method":"POST", + "requestUri":"/organization/access", + "responseCode":200 + }, + "input":{"shape":"EnableNotificationsAccessForOrganizationRequest"}, + "output":{"shape":"EnableNotificationsAccessForOrganizationResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"Enables service trust between User Notifications and Amazon Web Services Organizations.
", "idempotent":true }, "GetEventRule":{ @@ -170,7 +293,61 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"Returns a specified EventRule.
" + "documentation":"Returns a specified EventRule
.
Returns the child event of a specific given ManagedNotificationEvent
.
Returns a specified ManagedNotificationConfiguration
.
Returns a specified ManagedNotificationEvent
.
Returns a specified NotificationConfiguration.
" + "documentation":"Returns a specified NotificationConfiguration
.
Returns a specified NotificationEvent.
User Notifications stores notifications in the individual Regions you register as notification hubs and the Region of the source event rule. GetNotificationEvent only returns notifications stored in the same Region in which the action is called. User Notifications doesn't backfill notifications to new Regions selected as notification hubs. For this reason, we recommend that you make calls in your oldest registered notification hub. For more information, see Notification hubs in the AWS User Notifications User Guide.
Returns a specified NotificationEvent
.
User Notifications stores notifications in the individual Regions you register as notification hubs and the Region of the source event rule. GetNotificationEvent
only returns notifications stored in the same Region in which the action is called. User Notifications doesn't backfill notifications to new Regions selected as notification hubs. For this reason, we recommend that you make calls in your oldest registered notification hub. For more information, see Notification hubs in the Amazon Web Services User Notifications User Guide.
Returns the AccessStatus of Service Trust Enablement for User Notifications and Amazon Web Services Organizations.
" }, "ListChannels":{ "name":"ListChannels", @@ -224,7 +418,7 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"Returns a list of Channels for a NotificationConfiguration.
" + "documentation":"Returns a list of Channels for a NotificationConfiguration
.
Returns a list of EventRules according to specified filters, in reverse chronological order (newest first).
" + "documentation":"Returns a list of EventRules
according to specified filters, in reverse chronological order (newest first).
Returns a list of Account contacts and Channels associated with a ManagedNotificationConfiguration
, in paginated format.
Returns a list of ManagedNotificationChildEvents
for a specified aggregate ManagedNotificationEvent
, ordered by creation time in reverse chronological order (newest first).
Returns a list of Managed Notification Configurations according to specified filters, ordered by creation time in reverse chronological order (newest first).
" + }, + "ListManagedNotificationEvents":{ + "name":"ListManagedNotificationEvents", + "http":{ + "method":"GET", + "requestUri":"/managed-notification-events", + "responseCode":200 + }, + "input":{"shape":"ListManagedNotificationEventsRequest"}, + "output":{"shape":"ListManagedNotificationEventsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"Returns a list of Managed Notification Events according to specified filters, ordered by creation time in reverse chronological order (newest first).
" }, "ListNotificationConfigurations":{ "name":"ListNotificationConfigurations", @@ -259,7 +522,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"Returns a list of abbreviated NotificationConfigurations according to specified filters, in reverse chronological order (newest first).
" + "documentation":"Returns a list of abbreviated NotificationConfigurations
according to specified filters, in reverse chronological order (newest first).
Returns a list of NotificationEvents according to specified filters, in reverse chronological order (newest first).
User Notifications stores notifications in the individual Regions you register as notification hubs and the Region of the source event rule. ListNotificationEvents only returns notifications stored in the same Region in which the action is called. User Notifications doesn't backfill notifications to new Regions selected as notification hubs. For this reason, we recommend that you make calls in your oldest registered notification hub. For more information, see Notification hubs in the AWS User Notifications User Guide.
Returns a list of NotificationEvents
according to specified filters, in reverse chronological order (newest first).
User Notifications stores notifications in the individual Regions you register as notification hubs and the Region of the source event rule. ListNotificationEvents only returns notifications stored in the same Region in which the action is called. User Notifications doesn't backfill notifications to new Regions selected as notification hubs. For this reason, we recommend that you make calls in your oldest registered notification hub. For more information, see Notification hubs in the Amazon Web Services User Notifications User Guide.
Returns a list of NotificationHubs.
" + "documentation":"Returns a list of NotificationHubs
.
Returns a list of tags for a specified Amazon Resource Name (ARN).
For more information, see Tagging your AWS resources in the Tagging AWS Resources User Guide.
This is only supported for NotificationConfigurations.
Returns a list of tags for a specified Amazon Resource Name (ARN).
For more information, see Tagging your Amazon Web Services resources in the Tagging Amazon Web Services Resources User Guide.
This is only supported for NotificationConfigurations
.
Registers a NotificationHub in the specified Region.
There is a maximum of one NotificationHub per Region. You can have a maximum of 3 NotificationHubs at a time.
", + "documentation":"Registers a NotificationConfiguration
in the specified Region.
There is a maximum of one NotificationConfiguration
per Region. You can have a maximum of 3 NotificationHub
resources at a time.
Tags the resource with a tag key and value.
For more information, see Tagging your AWS resources in the Tagging AWS Resources User Guide.
This is only supported for NotificationConfigurations.
Tags the resource with a tag key and value.
For more information, see Tagging your Amazon Web Services resources in the Tagging Amazon Web Services Resources User Guide.
This is only supported for NotificationConfigurations
.
Untags a resource with a specified Amazon Resource Name (ARN).
For more information, see Tagging your AWS resources in the Tagging AWS Resources User Guide.
", + "documentation":"Untags a resource with a specified Amazon Resource Name (ARN).
For more information, see Tagging your Amazon Web Services resources in the Tagging Amazon Web Services Resources User Guide.
", "idempotent":true }, "UpdateEventRule":{ @@ -388,7 +651,7 @@ {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"Updates an existing EventRule.
", + "documentation":"Updates an existing EventRule
.
Updates a NotificationConfiguration.
", + "documentation":"Updates a NotificationConfiguration
.
Properties used to summarize aggregated events.
" + } + }, + "documentation":"Provides detailed information about the dimensions used for aggregation.
" + }, "AggregationDuration":{ "type":"string", "enum":[ @@ -446,6 +740,64 @@ "NONE" ] }, + "AggregationKey":{ + "type":"structure", + "required":[ + "name", + "value" + ], + "members":{ + "name":{ + "shape":"String", + "documentation":"Indicates the type of aggregation key.
" + }, + "value":{ + "shape":"String", + "documentation":"Indicates the value associated with the aggregation key name.
" + } + }, + "documentation":"Key-value collection that indicate how notifications are grouped.
" + }, + "AggregationKeys":{ + "type":"list", + "member":{"shape":"AggregationKey"} + }, + "AggregationSummary":{ + "type":"structure", + "required":[ + "eventCount", + "aggregatedBy", + "aggregatedAccounts", + "aggregatedRegions" + ], + "members":{ + "eventCount":{ + "shape":"Integer", + "documentation":"Indicates the number of events associated with the aggregation key.
" + }, + "aggregatedBy":{ + "shape":"AggregationKeys", + "documentation":"Indicates the criteria or rules by which notifications have been grouped together.
" + }, + "aggregatedAccounts":{ + "shape":"SummarizationDimensionOverview", + "documentation":"Indicates the Amazon Web Services accounts in the aggregation key.
" + }, + "aggregatedRegions":{ + "shape":"SummarizationDimensionOverview", + "documentation":"Indicates the Amazon Web Services Regions in the aggregation key.
" + }, + "aggregatedOrganizationalUnits":{ + "shape":"SummarizationDimensionOverview", + "documentation":"Indicates the collection of organizational units that are involved in the aggregation key.
" + }, + "additionalSummarizationDimensions":{ + "shape":"SummarizationDimensionOverviews", + "documentation":"List of additional dimensions used to group and summarize data.
" + } + }, + "documentation":"Provides additional information about the aggregation key.
" + }, "Arn":{ "type":"string", "pattern":"arn:[^:]*:[^:]*:[^:]*:.*" @@ -459,13 +811,13 @@ "members":{ "arn":{ "shape":"ChannelArn", - "documentation":"The Amazon Resource Name (ARN) of the Channel to associate with the NotificationConfiguration.
Supported ARNs include AWS Chatbot, the Console Mobile Application, and notifications-contacts.
", + "documentation":"The Amazon Resource Name (ARN) of the Channel to associate with the NotificationConfiguration
.
Supported ARNs include Chatbot, the Console Mobile Application, and notifications-contacts.
", "location":"uri", "locationName":"arn" }, "notificationConfigurationArn":{ "shape":"NotificationConfigurationArn", - "documentation":"The ARN of the NotificationConfiguration to associate with the Channel.
" + "documentation":"The ARN of the NotificationConfiguration
to associate with the Channel.
A unique value of an Account Contact Type to associate with the ManagedNotificationConfiguration
.
The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration
to associate with the Account Contact.
The Amazon Resource Name (ARN) of the Channel to associate with the ManagedNotificationConfiguration
.
Supported ARNs include Chatbot, the Console Mobile Application, and email (notifications-contacts).
", + "location":"uri", + "locationName":"channelArn" + }, + "managedNotificationConfigurationArn":{ + "shape":"ManagedNotificationConfigurationOsArn", + "documentation":"The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration
to associate with the additional Channel.
The Amazon Resource Name (ARN) of the NotificationConfiguration associated with this EventRule.
" + "documentation":"The Amazon Resource Name (ARN) of the NotificationConfiguration
associated with this EventRule
.
The matched event source.
Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2
and aws.cloudwatch
. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.
The matched event source.
Must match one of the valid EventBridge sources. Only Amazon Web Services service sourced events are supported. For example, aws.ec2
and aws.cloudwatch
. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide.
The event type to match.
Must match one of the valid Amazon EventBridge event types. For example, EC2 Instance State-change Notification and AWS CloudWatch Alarm State Change. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.
" + "documentation":"The event type to match.
Must match one of the valid Amazon EventBridge event types. For example, EC2 Instance State-change Notification and Amazon CloudWatch Alarm State Change. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide.
" }, "eventPattern":{ "shape":"EventRuleEventPattern", - "documentation":"An additional event pattern used to further filter the events this EventRule receives.
For more information, see Amazon EventBridge event patterns in the Amazon EventBridge User Guide.
" + "documentation":"An additional event pattern used to further filter the events this EventRule
receives.
For more information, see Amazon EventBridge event patterns in the Amazon EventBridge User Guide.
" }, "regions":{ "shape":"Regions", - "documentation":"A list of AWS Regions that send events to this EventRule.
" + "documentation":"A list of Amazon Web Services Regions that send events to this EventRule
.
The ARN of a NotificationConfiguration.
" + "documentation":"The ARN of a NotificationConfiguration
.
A list of an EventRule's status by Region. Regions are mapped to EventRuleStatusSummary.
" + "documentation":"A list of an EventRule
's status by Region. Regions are mapped to EventRuleStatusSummary
.
The name of the NotificationConfiguration. Supports RFC 3986's unreserved characters.
" + "documentation":"The name of the NotificationConfiguration
. Supports RFC 3986's unreserved characters.
The description of the NotificationConfiguration.
" + "documentation":"The description of the NotificationConfiguration
.
The aggregation preference of the NotificationConfiguration.
Values:
LONG
Aggregate notifications for long periods of time (12 hours).
SHORT
Aggregate notifications for short periods of time (5 minutes).
NONE
Don't aggregate notifications.
No delay in delivery.
The aggregation preference of the NotificationConfiguration
.
Values:
LONG
Aggregate notifications for long periods of time (12 hours).
SHORT
Aggregate notifications for short periods of time (5 minutes).
NONE
Don't aggregate notifications.
The Amazon Resource Name (ARN) of the the resource.
" + "documentation":"The Amazon Resource Name (ARN) of the NotificationConfiguration
.
The status of this NotificationConfiguration.
The status should always be INACTIVE
when part of the CreateNotificationConfiguration response.
Values:
ACTIVE
All EventRules are ACTIVE
and any call can be run.
PARTIALLY_ACTIVE
Some EventRules are ACTIVE
and some are INACTIVE
.
Any call can be run.
INACTIVE
All EventRules are INACTIVE
and any call can be run.
DELETING
This NotificationConfiguration is being deleted.
Only GET
and LIST
calls can be run.
The current status of this NotificationConfiguration
.
The Amazon Resource Name (ARN) of the EventRule to delete.
", + "documentation":"The Amazon Resource Name (ARN) of the EventRule
to delete.
The Amazon Resource Name (ARN) of the NotificationConfiguration to delete.
", + "documentation":"The Amazon Resource Name (ARN) of the NotificationConfiguration
to delete.
The NotificationHub Region.
", + "documentation":"The NotificationConfiguration
Region.
The NotificationHub Region.
" + "documentation":"The NotificationConfiguration
Region.
NotificationHub status information.
" + "documentation":" NotificationConfiguration
status information.
The ARN of the NotificationConfiguration to disassociate.
" + "documentation":"The ARN of the NotificationConfiguration
to disassociate.
The unique value of an Account Contact Type to associate with the ManagedNotificationConfiguration
.
The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration
to associate with the Account Contact.
The Amazon Resource Name (ARN) of the Channel to associate with the ManagedNotificationConfiguration
.
The Amazon Resource Name (ARN) of the Managed Notification Configuration to associate with the additional Channel.
" + } + } + }, + "DisassociateManagedNotificationAdditionalChannelResponse":{ + "type":"structure", + "members":{ + } + }, + "EnableNotificationsAccessForOrganizationRequest":{ + "type":"structure", + "members":{ + } + }, + "EnableNotificationsAccessForOrganizationResponse":{ + "type":"structure", + "members":{ + } + }, "ErrorMessage":{"type":"string"}, "EventRuleArn":{ "type":"string", @@ -746,14 +1234,14 @@ "members":{ "status":{ "shape":"EventRuleStatus", - "documentation":"The status of the EventRule.
Values:
ACTIVE
The EventRule can process events.
INACTIVE
The EventRule may be unable to process events.
CREATING
The EventRule is being created.
Only GET
and LIST
calls can be run.
UPDATING
The EventRule is being updated.
Only GET
and LIST
calls can be run.
DELETING
The EventRule is being deleted.
Only GET
and LIST
calls can be run.
The status of the EventRule
.
Values:
ACTIVE
The EventRule
can process events.
INACTIVE
The EventRule
may be unable to process events.
CREATING
The EventRule
is being created.
Only GET
and LIST
calls can be run.
UPDATING
The EventRule
is being updated.
Only GET
and LIST
calls can be run.
DELETING
The EventRule
is being deleted.
Only GET
and LIST
calls can be run.
A human-readable reason for EventRuleStatus.
" + "documentation":"A human-readable reason for EventRuleStatus
.
Describes EventRule status information.
" + "documentation":"Provides additional information about the current EventRule
status.
The Amazon Resource Name (ARN) of the resource.
" + "documentation":"The Amazon Resource Name (ARN) of the EventRule
. CloudFormation stack generates this ARN and then uses this ARN to associate with the NotificationConfiguration
.
The ARN for the NotificationConfiguration associated with this EventRule.
" + "documentation":"The ARN for the NotificationConfiguration
associated with this EventRule
.
The creation time of the resource.
" + "documentation":"The creation time of the EventRule
.
The matched event source.
Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2
and aws.cloudwatch
. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.
The event source this rule should match with the EventBridge event sources. It must match with atleast one of the valid EventBridge event sources. Only Amazon Web Services service sourced events are supported. For example, aws.ec2
and aws.cloudwatch
. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide.
The event type to match.
Must match one of the valid Amazon EventBridge event types. For example, EC2 Instance State-change Notification and AWS CloudWatch Alarm State Change. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.
" + "documentation":"The event type this rule should match with the EventBridge events. It must match with atleast one of the valid EventBridge event types. For example, Amazon EC2 Instance State change Notification and Amazon CloudWatch State Change. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide.
" }, "eventPattern":{ "shape":"EventRuleEventPattern", - "documentation":"An additional event pattern used to further filter the events this EventRule receives.
For more information, see Amazon EventBridge event patterns in the Amazon EventBridge User Guide.
" + "documentation":"An additional event pattern used to further filter the events this EventRule
receives.
For more information, see Amazon EventBridge event patterns in the Amazon EventBridge User Guide.
" }, "regions":{ "shape":"Regions", - "documentation":"A list of AWS Regions that send events to this EventRule.
" + "documentation":"A list of Amazon Web Services Regions that send events to this EventRule
.
A list of Amazon EventBridge Managed Rule ARNs associated with this EventRule.
These are created by AWS User Notifications within your account so your EventRules can function.
A list of Amazon EventBridge Managed Rule ARNs associated with this EventRule
.
These are created by User Notifications within your account so your EventRules
can function.
A list of an EventRule's status by Region. Regions are mapped to EventRuleStatusSummary.
" + "documentation":"A list of an EventRule
's status by Region. Regions are mapped to EventRuleStatusSummary
.
Contains a complete list of fields related to an EventRule.
" + "documentation":"Contains a complete list of fields related to an EventRule
.
The Amazon Resource Name (ARN) of the EventRule to return.
", + "documentation":"The Amazon Resource Name (ARN) of the EventRule
to return.
The ARN of a NotificationConfiguration.
" + "documentation":"The ARN of a NotificationConfiguration
.
The date when the EventRule was created.
" + "documentation":"The date when the EventRule
was created.
The matched event source.
Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2
and aws.cloudwatch
. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.
The matched event source.
Must match one of the valid EventBridge sources. Only Amazon Web Services service sourced events are supported. For example, aws.ec2
and aws.cloudwatch
. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide.
The event type to match.
Must match one of the valid Amazon EventBridge event types. For example, EC2 Instance State-change Notification and AWS CloudWatch Alarm State Change. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.
" + "documentation":"The event type to match.
Must match one of the valid Amazon EventBridge event types. For example, EC2 Instance State-change Notification and Amazon CloudWatch Alarm State Change. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide.
" }, "eventPattern":{ "shape":"EventRuleEventPattern", - "documentation":"An additional event pattern used to further filter the events this EventRule receives.
For more information, see Amazon EventBridge event patterns in the Amazon EventBridge User Guide.
" + "documentation":"An additional event pattern used to further filter the events this EventRule
receives.
For more information, see Amazon EventBridge event patterns in the Amazon EventBridge User Guide.
" }, "regions":{ "shape":"Regions", - "documentation":"A list of AWS Regions that send events to this EventRule.
" + "documentation":"A list of Amazon Web Services Regions that send events to this EventRule
.
A list of managed rules from EventBridge that are are associated with this EventRule.
These are created by AWS User Notifications within your account so this EventRule functions.
A list of managed rules from EventBridge that are associated with this EventRule
.
These are created by User Notifications within your account so this EventRule
functions.
A list of an EventRule's status by Region. Regions are mapped to EventRuleStatusSummary.
" + "documentation":"A list of an EventRule
's status by Region. Regions are mapped to EventRuleStatusSummary
.
The Amazon Resource Name (ARN) of the ManagedNotificationChildEvent
to return.
The locale code of the language used for the retrieved ManagedNotificationChildEvent
. The default locale is English en_US
.
The ARN of the resource.
" + }, + "managedNotificationConfigurationArn":{ + "shape":"ManagedNotificationConfigurationOsArn", + "documentation":"The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration
associated with the ManagedNotificationChildEvent
.
The creation time of the ManagedNotificationChildEvent
.
The content of the ManagedNotificationChildEvent
.
The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration
to return.
The ARN of the ManagedNotificationConfiguration
resource.
The name of the ManagedNotificationConfiguration
.
The description of the ManagedNotificationConfiguration
.
The category of the ManagedNotificationConfiguration
.
The subCategory of the ManagedNotificationConfiguration
.
The Amazon Resource Name (ARN) of the ManagedNotificationEvent
to return.
The locale code of the language used for the retrieved ManagedNotificationEvent
. The default locale is English (en_US)
.
The ARN of the resource.
" + }, + "managedNotificationConfigurationArn":{ + "shape":"ManagedNotificationConfigurationOsArn", + "documentation":"The ARN of the ManagedNotificationConfiguration
.
The creation time of the ManagedNotificationEvent
.
The content of the ManagedNotificationEvent
.
The Amazon Resource Name (ARN) of the NotificationConfiguration to return.
", + "documentation":"The Amazon Resource Name (ARN) of the NotificationConfiguration
to return.
The name of the NotificationConfiguration.
" + "documentation":"The name of the NotificationConfiguration
.
The description of the NotificationConfiguration.
" + "documentation":"The description of the NotificationConfiguration
.
The status of this NotificationConfiguration.
The status should always be INACTIVE
when part of the CreateNotificationConfiguration response.
Values:
ACTIVE
All EventRules are ACTIVE
and any call can be run.
PARTIALLY_ACTIVE
Some EventRules are ACTIVE
and some are INACTIVE
.
Any call can be run.
INACTIVE
All EventRules are INACTIVE
and any call can be run.
DELETING
This NotificationConfiguration is being deleted. Only GET
and LIST
calls can be run.
Only GET
and LIST
calls can be run.
The status of this NotificationConfiguration
.
The creation time of the NotificationConfiguration.
" + "documentation":"The creation time of the NotificationConfiguration
.
The aggregation preference of the NotificationConfiguration.
Values:
LONG
Aggregate notifications for long periods of time (12 hours).
SHORT
Aggregate notifications for short periods of time (5 minutes).
NONE
Don't aggregate notifications.
No delay in delivery.
The aggregation preference of the NotificationConfiguration
.
Values:
LONG
Aggregate notifications for long periods of time (12 hours).
SHORT
Aggregate notifications for short periods of time (5 minutes).
NONE
Don't aggregate notifications.
The Amazon Resource Name (ARN) of the NotificationEvent to return.
", + "documentation":"The Amazon Resource Name (ARN) of the NotificationEvent
to return.
The locale code of the language used for the retrieved NotificationEvent. The default locale is English en_US
.
The locale code of the language used for the retrieved NotificationEvent
. The default locale is English en_US
.
The ARN of the NotificationConfiguration.
" + "documentation":"The ARN of the NotificationConfiguration
.
The creation time of the NotificationEvent.
" + "documentation":"The creation time of the NotificationEvent
.
The content of the NotificationEvent.
" + "documentation":"The content of the NotificationEvent
.
The AccessStatus
of Service Trust Enablement for User Notifications to Amazon Web Services Organizations.
The Amazon Resource Name (ARN) of the NotificationConfiguration.
", + "documentation":"The Amazon Resource Name (ARN) of the NotificationConfiguration
.
The start token for paginated calls. Retrieved from the response of a previous ListNotificationEvents call. NextToken uses Base64 encoding.
", + "documentation":"The start token for paginated calls. Retrieved from the response of a previous ListNotificationEvents call. NextToken
uses Base64 encoding.
The Amazon Resource Name (ARN) of the NotificationConfiguration.
", + "documentation":"The Amazon Resource Name (ARN) of the NotificationConfiguration
.
The start token for paginated calls. Retrieved from the response of a previous ListEventRules call. Next token uses Base64 encoding.
", + "documentation":"The start token for paginated calls. Retrieved from the response of a previous ListEventRules
call. Next token uses Base64 encoding.
A list of EventRules.
" + "documentation":"A list of EventRules
.
The matched event source.
Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2
and aws.cloudwatch
. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.
The Amazon Resource Name (ARN) of the Channel to match.
", - "location":"querystring", - "locationName":"channelArn" - }, - "status":{ - "shape":"NotificationConfigurationStatus", - "documentation":"The NotificationConfiguration status to match.
Values:
ACTIVE
All EventRules are ACTIVE
and any call can be run.
PARTIALLY_ACTIVE
Some EventRules are ACTIVE
and some are INACTIVE
. Any call can be run.
Any call can be run.
INACTIVE
All EventRules are INACTIVE
and any call can be run.
DELETING
This NotificationConfiguration is being deleted.
Only GET
and LIST
calls can be run.
The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration
to match.
The maximum number of results to be returned in this call. Defaults to 20.
", "location":"querystring", "locationName":"maxResults" }, "nextToken":{ "shape":"NextToken", - "documentation":"The start token for paginated calls. Retrieved from the response of a previous ListEventRules call. Next token uses Base64 encoding.
", + "documentation":"The start token for paginated calls. Retrieved from the response of a previous ListManagedNotificationChannelAssociations
call.
A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.
" }, - "notificationConfigurations":{ - "shape":"NotificationConfigurations", - "documentation":"The NotificationConfigurations in the account.
" + "channelAssociations":{ + "shape":"ManagedNotificationChannelAssociations", + "documentation":"A list that contains the following information about a channel association.
" } } }, - "ListNotificationEventsRequest":{ + "ListManagedNotificationChildEventsRequest":{ "type":"structure", + "required":["aggregateManagedNotificationEventArn"], "members":{ + "aggregateManagedNotificationEventArn":{ + "shape":"ManagedNotificationEventArn", + "documentation":"The Amazon Resource Name (ARN) of the ManagedNotificationEvent
.
The earliest time of events to return from this call.
", @@ -1162,139 +1795,734 @@ }, "locale":{ "shape":"LocaleCode", - "documentation":"The locale code of the language used for the retrieved NotificationEvent. The default locale is English (en_US)
.
The locale code of the language used for the retrieved NotificationEvent
. The default locale is English.en_US
.
The matched event source.
Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2
and aws.cloudwatch
. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.
The maximum number of results to be returned in this call. Defaults to 20.
", "location":"querystring", - "locationName":"source" + "locationName":"maxResults" }, - "includeChildEvents":{ - "shape":"Boolean", - "documentation":"Include aggregated child events in the result.
", + "relatedAccount":{ + "shape":"AccountId", + "documentation":"The Amazon Web Services account ID associated with the Managed Notification Child Events.
", "location":"querystring", - "locationName":"includeChildEvents" + "locationName":"relatedAccount" }, - "aggregateNotificationEventArn":{ - "shape":"NotificationEventArn", - "documentation":"The Amazon Resource Name (ARN) of the aggregatedNotificationEventArn to match.
", + "organizationalUnitId":{ + "shape":"OrganizationalUnitId", + "documentation":"The identifier of the Amazon Web Services Organizations organizational unit (OU) associated with the Managed Notification Child Events.
", "location":"querystring", - "locationName":"aggregateNotificationEventArn" + "locationName":"organizationalUnitId" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"The start token for paginated calls. Retrieved from the response of a previous ListManagedNotificationChannelAssociations call. Next token uses Base64 encoding.
", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListManagedNotificationChildEventsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListManagedNotificationChildEventsResponse":{ + "type":"structure", + "required":["managedNotificationChildEvents"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.
" + }, + "managedNotificationChildEvents":{ + "shape":"ManagedNotificationChildEvents", + "documentation":"A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.
" + } + } + }, + "ListManagedNotificationConfigurationsRequest":{ + "type":"structure", + "members":{ + "channelIdentifier":{ + "shape":"ChannelIdentifier", + "documentation":"The identifier or ARN of the notification channel to filter configurations by.
", + "location":"querystring", + "locationName":"channelIdentifier" }, "maxResults":{ - "shape":"ListNotificationEventsRequestMaxResultsInteger", + "shape":"ListManagedNotificationConfigurationsRequestMaxResultsInteger", "documentation":"The maximum number of results to be returned in this call. Defaults to 20.
", "location":"querystring", "locationName":"maxResults" }, - "nextToken":{ - "shape":"NextToken", - "documentation":"The start token for paginated calls. Retrieved from the response of a previous ListEventRules call. Next token uses Base64 encoding.
", - "location":"querystring", - "locationName":"nextToken" + "nextToken":{ + "shape":"NextToken", + "documentation":"The start token for paginated calls. Retrieved from the response of a previous ListManagedNotificationChannelAssociations call. Next token uses Base64 encoding.
", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListManagedNotificationConfigurationsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListManagedNotificationConfigurationsResponse":{ + "type":"structure", + "required":["managedNotificationConfigurations"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.
" + }, + "managedNotificationConfigurations":{ + "shape":"ManagedNotificationConfigurations", + "documentation":"A list of Managed Notification Configurations matching the request criteria.
" + } + } + }, + "ListManagedNotificationEventsRequest":{ + "type":"structure", + "members":{ + "startTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"The earliest time of events to return from this call.
", + "location":"querystring", + "locationName":"startTime" + }, + "endTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"Latest time of events to return from this call.
", + "location":"querystring", + "locationName":"endTime" + }, + "locale":{ + "shape":"LocaleCode", + "documentation":"The locale code of the language used for the retrieved NotificationEvent. The default locale is English (en_US).
", + "location":"querystring", + "locationName":"locale" + }, + "source":{ + "shape":"Source", + "documentation":"The Amazon Web Services service the event originates from. For example aws.cloudwatch.
", + "location":"querystring", + "locationName":"source" + }, + "maxResults":{ + "shape":"ListManagedNotificationEventsRequestMaxResultsInteger", + "documentation":"The maximum number of results to be returned in this call. Defaults to 20.
", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"The start token for paginated calls. Retrieved from the response of a previous ListManagedNotificationChannelAssociations
call. Next token uses Base64 encoding.
The Organizational Unit Id that an Amazon Web Services account belongs to.
", + "location":"querystring", + "locationName":"organizationalUnitId" + }, + "relatedAccount":{ + "shape":"AccountId", + "documentation":"The Amazon Web Services account ID associated with the Managed Notification Events.
", + "location":"querystring", + "locationName":"relatedAccount" + } + } + }, + "ListManagedNotificationEventsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListManagedNotificationEventsResponse":{ + "type":"structure", + "required":["managedNotificationEvents"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.
" + }, + "managedNotificationEvents":{ + "shape":"ManagedNotificationEvents", + "documentation":"A list of Managed Notification Events matching the request criteria.
" + } + } + }, + "ListNotificationConfigurationsRequest":{ + "type":"structure", + "members":{ + "eventRuleSource":{ + "shape":"Source", + "documentation":"The matched event source.
Must match one of the valid EventBridge sources. Only Amazon Web Services service sourced events are supported. For example, aws.ec2
and aws.cloudwatch
. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide.
The Amazon Resource Name (ARN) of the Channel to match.
", + "location":"querystring", + "locationName":"channelArn" + }, + "status":{ + "shape":"NotificationConfigurationStatus", + "documentation":"The NotificationConfiguration
status to match.
Values:
ACTIVE
All EventRules
are ACTIVE
and any call can be run.
PARTIALLY_ACTIVE
Some EventRules
are ACTIVE
and some are INACTIVE
. Any call can be run.
Any call can be run.
INACTIVE
All EventRules
are INACTIVE
and any call can be run.
DELETING
This NotificationConfiguration
is being deleted.
Only GET
and LIST
calls can be run.
The maximum number of results to be returned in this call. Defaults to 20.
", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"The start token for paginated calls. Retrieved from the response of a previous ListEventRules
call. Next token uses Base64 encoding.
A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.
" + }, + "notificationConfigurations":{ + "shape":"NotificationConfigurations", + "documentation":"The NotificationConfigurations
in the account.
The earliest time of events to return from this call.
", + "location":"querystring", + "locationName":"startTime" + }, + "endTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"Latest time of events to return from this call.
", + "location":"querystring", + "locationName":"endTime" + }, + "locale":{ + "shape":"LocaleCode", + "documentation":"The locale code of the language used for the retrieved NotificationEvent
. The default locale is English (en_US)
.
The matched event source.
Must match one of the valid EventBridge sources. Only Amazon Web Services service sourced events are supported. For example, aws.ec2
and aws.cloudwatch
. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide.
Include aggregated child events in the result.
", + "location":"querystring", + "locationName":"includeChildEvents" + }, + "aggregateNotificationEventArn":{ + "shape":"NotificationEventArn", + "documentation":"The Amazon Resource Name (ARN) of the aggregatedNotificationEventArn
to match.
The maximum number of results to be returned in this call. Defaults to 20.
", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"The start token for paginated calls. Retrieved from the response of a previous ListEventRules
call. Next token uses Base64 encoding.
A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.
" + }, + "notificationEvents":{ + "shape":"NotificationEvents", + "documentation":"The list of notification events.
" + } + } + }, + "ListNotificationHubsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListNotificationHubsRequestMaxResultsInteger", + "documentation":"The maximum number of records to list in a single response.
", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"A pagination token. Set to null to start listing notification hubs from the start.
", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListNotificationHubsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":3, + "min":3 + }, + "ListNotificationHubsResponse":{ + "type":"structure", + "required":["notificationHubs"], + "members":{ + "notificationHubs":{ + "shape":"NotificationHubs", + "documentation":"The NotificationHubs
in the account.
A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.
" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"NotificationConfigurationArn", + "documentation":"The Amazon Resource Name (ARN) to use to list tags.
", + "location":"uri", + "locationName":"arn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"A list of tags for the specified ARN.
" + } + } + }, + "LocaleCode":{ + "type":"string", + "enum":[ + "de_DE", + "en_CA", + "en_US", + "en_UK", + "es_ES", + "fr_CA", + "fr_FR", + "id_ID", + "it_IT", + "ja_JP", + "ko_KR", + "pt_BR", + "tr_TR", + "zh_CN", + "zh_TW" + ] + }, + "ManagedNotificationChannelAssociationSummary":{ + "type":"structure", + "required":[ + "channelIdentifier", + "channelType" + ], + "members":{ + "channelIdentifier":{ + "shape":"String", + "documentation":"The unique identifier for the notification channel.
" + }, + "channelType":{ + "shape":"ChannelType", + "documentation":"The type of notification channel used for message delivery.
Values:
ACCOUNT_CONTACT
Delivers notifications to Account Managed contacts through the User Notification Service.
MOBILE
Delivers notifications through the Amazon Web Services Console Mobile Application to mobile devices.
CHATBOT
Delivers notifications through Chatbot to collaboration platforms (Slack, Chime).
EMAIL
Delivers notifications to email addresses.
Controls whether users can modify channel associations for a notification configuration.
Values:
ENABLED
Users can associate or disassociate channels with the notification configuration.
DISABLED
Users cannot associate or disassociate channels with the notification configuration.
Provides a summary of channel associations for a managed notification configuration.
" + }, + "ManagedNotificationChannelAssociations":{ + "type":"list", + "member":{"shape":"ManagedNotificationChannelAssociationSummary"} + }, + "ManagedNotificationChildEvent":{ + "type":"structure", + "required":[ + "schemaVersion", + "id", + "messageComponents", + "notificationType", + "aggregateManagedNotificationEventArn", + "textParts" + ], + "members":{ + "schemaVersion":{ + "shape":"SchemaVersion", + "documentation":"The schema version of the Managed Notification Child Event.
" + }, + "id":{ + "shape":"NotificationEventId", + "documentation":"The unique identifier for a Managed Notification Child Event.
" + }, + "messageComponents":{"shape":"MessageComponents"}, + "sourceEventDetailUrl":{ + "shape":"Url", + "documentation":"The source event URL.
" + }, + "sourceEventDetailUrlDisplayText":{ + "shape":"String", + "documentation":"The detailed URL for the source event.
" + }, + "notificationType":{ + "shape":"NotificationType", + "documentation":"The type of event causing the notification.
Values:
ALERT
A notification about an event where something was triggered, initiated, reopened, deployed, or a threshold was breached.
WARNING
A notification about an event where an issue is about to arise. For example, something is approaching a threshold.
ANNOUNCEMENT
A notification about an important event. For example, a step in a workflow or escalation path or that a workflow was updated.
INFORMATIONAL
A notification about informational messages. For example, recommendations, service announcements, or reminders.
The assesed nature of the event.
Values:
HEALTHY
All EventRules
are ACTIVE
.
UNHEALTHY
Some EventRules
are ACTIVE
and some are INACTIVE
.
The Amazon Resource Name (ARN) of the ManagedNotificationEvent that is associated with this Managed Notification Child Event.
" + }, + "startTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"The notification event start time.
" + }, + "endTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"The end time of the event.
" + }, + "textParts":{ + "shape":"TextParts", + "documentation":"A list of text values.
" + }, + "organizationalUnitId":{ + "shape":"OrganizationalUnitId", + "documentation":"The Organizational Unit Id that an Amazon Web Services account belongs to.
" + }, + "aggregationDetail":{ + "shape":"AggregationDetail", + "documentation":"Provides detailed information about the dimensions used for event summarization and aggregation.
" + } + }, + "documentation":"A ManagedNotificationChildEvent is a notification-focused representation of an event. They contain semantic information used to create aggregated or non-aggregated end-user notifications.
" + }, + "ManagedNotificationChildEventArn":{ + "type":"string", + "pattern":"arn:[-.a-z0-9]{1,63}:notifications::[0-9]{12}:managed-notification-configuration/category/[a-zA-Z0-9\\-]{3,64}/sub-category/[a-zA-Z0-9\\-]{3,64}/event/[a-z0-9]{27}/child-event/[a-z0-9]{27}" + }, + "ManagedNotificationChildEventOverview":{ + "type":"structure", + "required":[ + "arn", + "managedNotificationConfigurationArn", + "relatedAccount", + "creationTime", + "childEvent", + "aggregateManagedNotificationEventArn" + ], + "members":{ + "arn":{ + "shape":"ManagedNotificationEventArn", + "documentation":"The Amazon Resource Name (ARN) of the ManagedNotificationChildEvent
.
The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration
.
The account that related to the ManagedNotificationChildEvent
.
The creation time of the ManagedNotificationChildEvent
.
The content of the ManagedNotificationChildEvent
.
The Amazon Resource Name (ARN) of the ManagedNotificationEvent that is associated with this ManagedNotificationChildEvent
.
The Organizational Unit Id that an AWS account belongs to.
" + } + }, + "documentation":"Describes an overview and metadata for a ManagedNotificationChildEvent
.
The schema version of the ManagedNotificationChildEvent
.
Contains all event metadata present identically across all NotificationEvents
. All fields are present in Source Events via Eventbridge.
Provides detailed information about the dimensions used for event summarization and aggregation.
" + }, + "eventStatus":{ + "shape":"EventStatus", + "documentation":"The perceived nature of the event.
Values:
HEALTHY
All EventRules are ACTIVE
and any call can be run.
UNHEALTHY
Some EventRules are ACTIVE
and some are INACTIVE
. Any call can be run.
The Type of the event causing this notification.
Values:
ALERT
A notification about an event where something was triggered, initiated, reopened, deployed, or a threshold was breached.
WARNING
A notification about an event where an issue is about to arise. For example, something is approaching a threshold.
ANNOUNCEMENT
A notification about an important event. For example, a step in a workflow or escalation path or that a workflow was updated.
INFORMATIONAL
A notification about informational messages. For example, recommendations, service announcements, or reminders.
Describes a short summary and metadata for a ManagedNotificationChildEvent
.
A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.
" - }, - "notificationEvents":{ - "shape":"NotificationEvents", - "documentation":"The list of notification events.
" - } - } + "ManagedNotificationConfigurationDescription":{ + "type":"string", + "max":256, + "min":0, + "pattern":"[^\\u0001-\\u001F\\u007F-\\u009F]*" }, - "ListNotificationHubsRequest":{ + "ManagedNotificationConfigurationName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[A-Za-z0-9\\-]+" + }, + "ManagedNotificationConfigurationOsArn":{ + "type":"string", + "pattern":"arn:[-.a-z0-9]{1,63}:notifications::[0-9]{12}:managed-notification-configuration/category/[a-zA-Z0-9\\-]{3,64}/sub-category/[a-zA-Z0-9\\-]{3,64}" + }, + "ManagedNotificationConfigurationStructure":{ "type":"structure", + "required":[ + "arn", + "name", + "description" + ], "members":{ - "maxResults":{ - "shape":"ListNotificationHubsRequestMaxResultsInteger", - "documentation":"The maximum number of records to list in a single response.
", - "location":"querystring", - "locationName":"maxResults" + "arn":{ + "shape":"ManagedNotificationConfigurationOsArn", + "documentation":"The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration
.
A pagination token. Set to null to start listing notification hubs from the start.
", - "location":"querystring", - "locationName":"nextToken" + "name":{ + "shape":"ManagedNotificationConfigurationName", + "documentation":"The name of the ManagedNotificationConfiguration
.
The description of the ManagedNotificationConfiguration
.
Describes the basic structure and properties of a ManagedNotificationConfiguration
.
The NotificationHubs in the account.
" + "schemaVersion":{ + "shape":"SchemaVersion", + "documentation":"Version of the ManagedNotificationEvent
schema.
A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.
" + "id":{ + "shape":"NotificationEventId", + "documentation":"Unique identifier for a ManagedNotificationEvent
.
URL defined by Source Service to be used by notification consumers to get additional information about event.
" + }, + "sourceEventDetailUrlDisplayText":{ + "shape":"String", + "documentation":"Text that needs to be hyperlinked with the sourceEventDetailUrl. For example, the description of the sourceEventDetailUrl.
" + }, + "notificationType":{ + "shape":"NotificationType", + "documentation":"The nature of the event causing this notification.
Values:
ALERT
A notification about an event where something was triggered, initiated, reopened, deployed, or a threshold was breached.
WARNING
A notification about an event where an issue is about to arise. For example, something is approaching a threshold.
ANNOUNCEMENT
A notification about an important event. For example, a step in a workflow or escalation path or that a workflow was updated.
INFORMATIONAL
A notification about informational messages. For example, recommendations, service announcements, or reminders.
The status of an event.
Values:
HEALTHY
All EventRules are ACTIVE
and any call can be run.
UNHEALTHY
Some EventRules are ACTIVE
and some are INACTIVE
. Any call can be run.
The notifications aggregation type.
" + }, + "aggregationSummary":{"shape":"AggregationSummary"}, + "startTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"The earliest time of events to return from this call.
" + }, + "endTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"The end time of the notification event.
" + }, + "textParts":{ + "shape":"TextParts", + "documentation":"A list of text values.
" + }, + "organizationalUnitId":{ + "shape":"OrganizationalUnitId", + "documentation":"The Organizational Unit Id that an Amazon Web Services account belongs to.
" } - } + }, + "documentation":"A notification-focused representation of an event. They contain semantic information used by AccountContacts or Additional Channels to create end-user notifications.
" }, - "ListTagsForResourceRequest":{ + "ManagedNotificationEventArn":{ + "type":"string", + "pattern":"arn:[-.a-z0-9]{1,63}:notifications::[0-9]{12}:managed-notification-configuration/category/[a-zA-Z0-9\\-]{3,64}/sub-category/[a-zA-Z0-9\\-]{3,64}/event/[a-z0-9]{27}" + }, + "ManagedNotificationEventOverview":{ "type":"structure", - "required":["arn"], + "required":[ + "arn", + "managedNotificationConfigurationArn", + "relatedAccount", + "creationTime", + "notificationEvent" + ], "members":{ "arn":{ - "shape":"NotificationConfigurationArn", - "documentation":"The Amazon Resource Name (ARN) to use to list tags.
", - "location":"uri", - "locationName":"arn" + "shape":"ManagedNotificationEventArn", + "documentation":"The Amazon Resource Name (ARN) of the ManagedNotificationEvent.
" + }, + "managedNotificationConfigurationArn":{ + "shape":"ManagedNotificationConfigurationOsArn", + "documentation":"The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration
.
The account that related to the ManagedNotificationEvent
.
The creation time of the ManagedNotificationEvent
.
The notifications aggregation type.
Values:
AGGREGATE
The notification event is an aggregate notification. Aggregate notifications summarize grouped events over a specified time period.
CHILD
Some EventRules
are ACTIVE
and some are INACTIVE
. Any call can be run.
NONE
The notification isn't aggregated.
The Organizational Unit Id that an Amazon Web Services account belongs to.
" + }, + "aggregationSummary":{"shape":"AggregationSummary"}, + "aggregatedNotificationRegions":{ + "shape":"AggregatedNotificationRegions", + "documentation":"The list of the regions where the aggregated notifications in this NotificationEvent
originated.
Describes an overview and metadata for a ManagedNotificationEvent.
" }, - "ListTagsForResourceResponse":{ + "ManagedNotificationEventSummary":{ "type":"structure", + "required":[ + "schemaVersion", + "sourceEventMetadata", + "messageComponents", + "eventStatus", + "notificationType" + ], "members":{ - "tags":{ - "shape":"TagMap", - "documentation":"A list of tags for the specified ARN.
" + "schemaVersion":{ + "shape":"SchemaVersion", + "documentation":"The schema version of the ManagedNotificationEvent
.
Contains metadata about the event that caused the ManagedNotificationEvent
.
The managed notification event status.
Values:
HEALTHY
All EventRules
are ACTIVE
.
UNHEALTHY
Some EventRules
are ACTIVE
and some are INACTIVE
.
The Type of event causing the notification.
Values:
ALERT
A notification about an event where something was triggered, initiated, reopened, deployed, or a threshold was breached.
WARNING
A notification about an event where an issue is about to arise. For example, something is approaching a threshold.
ANNOUNCEMENT
A notification about an important event. For example, a step in a workflow or escalation path or that a workflow was updated.
INFORMATIONAL
A notification about informational messages. For example, recommendations, service announcements, or reminders.
A short summary of a ManagedNotificationEvent
. This is only used when listing managed notification events.
The Region where the notification originated.
" + }, + "source":{ + "shape":"Source", + "documentation":"The source service of the notification.
Must match one of the valid EventBridge sources. Only Amazon Web Services service sourced events are supported. For example, aws.ec2
and aws.cloudwatch
. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide.
The event Type of the notification.
" + } + }, + "documentation":"A short summary and metadata for a managed notification event.
" + }, + "ManagedSourceEventMetadataSummaryEventOriginRegionString":{ + "type":"string", + "max":32, + "min":0, + "pattern":"([a-z]{1,2})-([a-z]{1,15}-)+([0-9])" + }, + "ManagedSourceEventMetadataSummaryEventTypeString":{ + "type":"string", + "max":256, + "min":1, + "pattern":"([a-zA-Z0-9 \\-\\(\\)])+" + }, "Media":{ "type":"list", "member":{"shape":"MediaElement"} @@ -1327,7 +2589,7 @@ }, "url":{ "shape":"Url", - "documentation":"The url of the media.
" + "documentation":"The URL of the media.
" }, "caption":{ "shape":"TextPartReference", @@ -1354,7 +2616,7 @@ }, "paragraphSummary":{ "shape":"TextPartReference", - "documentation":"A paragraph long or multiple sentence summary. For example, AWS Chatbot notifications.
" + "documentation":"A paragraph long or multiple sentence summary. For example, Chatbot notifications.
" }, "completeDescription":{ "shape":"TextPartReference", @@ -1426,27 +2688,27 @@ "members":{ "arn":{ "shape":"NotificationConfigurationArn", - "documentation":"The Amazon Resource Name (ARN) of the resource.
" + "documentation":"The Amazon Resource Name (ARN) of the NotificationConfiguration
resource.
The name of the NotificationConfiguration. Supports RFC 3986's unreserved characters.
" + "documentation":"The name of the NotificationConfiguration
. Supports RFC 3986's unreserved characters.
The description of the NotificationConfiguration.
" + "documentation":"The description of the NotificationConfiguration
.
The status of this NotificationConfiguration.
The status should always be INACTIVE when part of the CreateNotificationConfiguration response.
Values:
ACTIVE
All EventRules are ACTIVE
and any call can be run.
PARTIALLY_ACTIVE
Some EventRules are ACTIVE
and some are INACTIVE
.
Any call can be run.
INACTIVE
All EventRules are INACTIVE
and any call can be run.
DELETING
This NotificationConfiguration is being deleted. Only GET
and LIST
calls can be run.
Only GET
and LIST
calls can be run.
The current status of the NotificationConfiguration
.
The creation time of the resource.
" + "documentation":"The creation time of the NotificationConfiguration
.
The aggregation preference of the NotificationConfiguration.
Values:
LONG
Aggregate notifications for long periods of time (12 hours).
SHORT
Aggregate notifications for short periods of time (5 minutes).
NONE
Don't aggregate notifications.
No delay in delivery.
The aggregation preference of the NotificationConfiguration
.
Values:
LONG
Aggregate notifications for long periods of time (12 hours).
SHORT
Aggregate notifications for short periods of time (5 minutes).
NONE
Don't aggregate notifications.
Contains the complete list of fields for a NotificationConfiguration.
" @@ -1473,7 +2735,7 @@ }, "id":{ "shape":"NotificationEventId", - "documentation":"The unique identifier for a NotificationEvent.
" + "documentation":"The unique identifier for a NotificationEvent
.
The assesed nature of the event.
Values:
HEALTHY
All EventRules are ACTIVE
and any call can be run.
UNHEALTHY
Some EventRules are ACTIVE
and some are INACTIVE
. Any call can be run.
The assessed nature of the event.
Values:
HEALTHY
All EventRules
are ACTIVE
and any call can be run.
UNHEALTHY
Some EventRules
are ACTIVE
and some are INACTIVE
. Any call can be run.
The NotificationConfiguration's aggregation type.
Values:
AGGREGATE
The notification event is an aggregate notification. Aggregate notifications summarize grouped events over a specified time period.
CHILD
Some EventRules are ACTIVE
and some are INACTIVE
. Any call can be run.
NONE
The notification isn't aggregated.
The aggregation type of the NotificationConfiguration
.
Values:
AGGREGATE
The notification event is an aggregate notification. Aggregate notifications summarize grouped events over a specified time period.
CHILD
Some EventRules
are ACTIVE
and some are INACTIVE
. Any call can be run.
NONE
The notification isn't aggregated.
If the value of aggregationEventType is not NONE
, this is the Amazon Resource Event (ARN) of the parent aggregate notification.
This is omitted if notification isn't aggregated.
" + "documentation":"If the value of aggregationEventType
is not NONE
, this is the Amazon Resource Event (ARN) of the parent aggregate notification.
This is omitted if notification isn't aggregated.
" + }, + "aggregationSummary":{ + "shape":"AggregationSummary", + "documentation":"Provides additional information about how multiple notifications are grouped.
" }, "startTime":{ "shape":"SyntheticTimestamp_date_time", @@ -1521,7 +2787,7 @@ "documentation":"A list of media elements.
" } }, - "documentation":"A NotificationEvent is a notification-focused representation of an event. They contain semantic information used by Channels to create end-user notifications.
" + "documentation":"A NotificationEvent
is a notification-focused representation of an event. They contain semantic information used by Channels to create end-user notifications.
The ARN of the NotificationConfiguration.
" + "documentation":"The ARN of the NotificationConfiguration
.
The account name containing the NotificationHub.
" + "documentation":"The account name containing the NotificationHub
.
The creation time of the NotificationEvent.
" + "documentation":"The creation time of the NotificationEvent
.
Refers to a NotificationEventSummary object.
Similar in structure to content
in the GetNotificationEvent response.
Refers to a NotificationEventSummary
object.
Similar in structure to content
in the GetNotificationEvent
response.
The NotificationConfiguration's aggregation type.
Values:
AGGREGATE
The notification event is an aggregate notification. Aggregate notifications summarize grouped events over a specified time period.
CHILD
Some EventRules are ACTIVE
and some are INACTIVE
. Any call can be run.
NONE
The notification isn't aggregated.
The NotificationConfiguration
's aggregation type.
Values:
AGGREGATE
The notification event is an aggregate notification. Aggregate notifications summarize grouped events over a specified time period.
CHILD
Some EventRules
are ACTIVE
and some are INACTIVE
. Any call can be run.
NONE
The notification isn't aggregated.
The ARN of the aggregatedNotificationEventArn to match.
" + "documentation":"The ARN of the aggregatedNotificationEventArn
to match.
Provides an aggregated summary data for notification events.
" } }, - "documentation":"Describes a short summary of a NotificationEvent. This is only used when listing notification events.
" + "documentation":"Describes a short summary of a NotificationEvent
. This is only used when listing notification events.
The notification event status.
Values:
HEALTHY
All EventRules are ACTIVE
and any call can be run.
UNHEALTHY
Some EventRules are ACTIVE
and some are INACTIVE
. Any call can be run.
Provides additional information about the current status of the NotificationEvent
.
Values:
HEALTHY
All EventRules
are ACTIVE
.
UNHEALTHY
Some EventRules
are ACTIVE
and some are INACTIVE
.
The type of event causing the notification.
Values:
ALERT
A notification about an event where something was triggered, initiated, reopened, deployed, or a threshold was breached.
WARNING
A notification about an event where an issue is about to arise. For example, something is approaching a threshold.
ANNOUNCEMENT
A notification about an important event. For example, a step in a workflow or escalation path or that a workflow was updated.
INFORMATIONAL
A notification about informational messages. For example, recommendations, service announcements, or reminders.
Describes a short summary and metadata for a notification event.
" + "documentation":"Describes a short summary and metadata for a NotificationEvent
.
The date and time the resource was created.
" + "documentation":"The date and time the NotificationHubOverview
was created.
The most recent time this NotificationHub had an ACTIVE status.
" + "documentation":"The most recent time this NotificationHub
had an ACTIVE
status.
Describes an overview of a NotificationHub.
A NotificationHub is an account-level setting used to select the Regions where you want to store, process and replicate your notifications.
" + "documentation":"Describes an overview of a NotificationHub
.
A NotificationConfiguration
is an account-level setting used to select the Regions where you want to store, process and replicate your notifications.
Status information about the NotificationHub.
Values:
ACTIVE
Incoming NotificationEvents are replicated to this NotificationHub.
REGISTERING
The NotificationHub is initializing. A NotificationHub with this status can't be deregistered.
DEREGISTERING
The NotificationHub is being deleted. You can't register additional NotificationHubs in the same Region as a NotificationHub with this status.
Status information about the NotificationHub
.
Values:
ACTIVE
Incoming NotificationEvents
are replicated to this NotificationHub
.
REGISTERING
The NotificationConfiguration
is initializing. A NotificationConfiguration
with this status can't be deregistered.
DEREGISTERING
The NotificationConfiguration
is being deleted. You can't register additional NotificationHubs
in the same Region as a NotificationConfiguration
with this status.
An Explanation for the current status.
" + "documentation":"An explanation for the current status.
" } }, - "documentation":"NotificationHub status information.
" + "documentation":"Provides additional information about the current NotificationHub
status.
Access Status for the Orgs Service.
" + } + }, + "documentation":"Orgs Service trust for User Notifications.
" + }, + "OrganizationalUnitId":{ + "type":"string", + "pattern":"Root|ou-[0-9a-z]{4,32}-[a-z0-9]{8,32}" + }, "QuotaCode":{"type":"string"}, "Region":{ "type":"string", @@ -1695,7 +2980,7 @@ "members":{ "notificationHubRegion":{ "shape":"Region", - "documentation":"The Region of the NotificationHub.
" + "documentation":"The Region of the NotificationHub
.
The Region of the NotificationHub.
" + "documentation":"The Region of the NotificationHub
.
NotificationHub status information.
" + "documentation":"Provides additional information about the current NotificationConfiguration
status information.
The Primary AWS account of Source Event
" + "documentation":"The primary Amazon Web Services account of SourceEvent
.
The AWS servvice the event originates from. For example aws.cloudwatch
.
The Amazon Web Services service the event originates from. For example aws.cloudwatch
.
The type of event. For example, an AWS CloudWatch state change.
" + "documentation":"The type of event. For example, an Amazon CloudWatch state change.
" }, "relatedResources":{ "shape":"Resources", - "documentation":"A list of resources related to this NotificationEvent.
" + "documentation":"A list of resources related to this NotificationEvent
.
Describes the metadata for a source event.
For more information, see Event structure reference in the Amazon EventBridge User Guide.
" @@ -1897,14 +3193,14 @@ }, "source":{ "shape":"String", - "documentation":"The matched event source.
Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2
and aws.cloudwatch
. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.
The matched event source.
Must match one of the valid EventBridge sources. Only Amazon Web Services service sourced events are supported. For example, aws.ec2
and aws.cloudwatch
. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide.
The event type to match.
Must match one of the valid Amazon EventBridge event types. For example, EC2 Instance State-change Notification and AWS CloudWatch Alarm State Change. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.
" + "documentation":"The event type to match.
Must match one of the valid Amazon EventBridge event types. For example, EC2 Instance State-change Notification and Amazon CloudWatch Alarm State Change. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide.
" } }, - "documentation":"Contains metadata about the event that caused the NotificationEvent. For other specific values, see sourceEventMetadata.
" + "documentation":"Contains metadata about the event that caused the NotificationEvent
. For other specific values, see sourceEventMetadata
.
The name of the SummarizationDimensionDetail.
" + }, + "value":{ + "shape":"String", + "documentation":"Value of the property used to summarize aggregated events.
" + } + }, + "documentation":"Provides detailed information about the dimensions used for event summarization and aggregation.
" + }, + "SummarizationDimensionDetails":{ + "type":"list", + "member":{"shape":"SummarizationDimensionDetail"} + }, + "SummarizationDimensionOverview":{ + "type":"structure", + "required":[ + "name", + "count" + ], + "members":{ + "name":{ + "shape":"String", + "documentation":"Name of the summarization dimension.
" + }, + "count":{ + "shape":"Integer", + "documentation":"Total number of occurrences for this dimension.
" + }, + "sampleValues":{ + "shape":"SampleAggregationDimensionValues", + "documentation":"Indicates the sample values found within the dimension.
" + } + }, + "documentation":"Provides an overview of how data is summarized across different dimensions.
" + }, + "SummarizationDimensionOverviews":{ + "type":"list", + "member":{"shape":"SummarizationDimensionOverview"} + }, "SyntheticTimestamp_date_time":{ "type":"timestamp", "timestampFormat":"iso8601" @@ -2013,7 +3357,7 @@ }, "displayText":{ "shape":"TextPartValueDisplayTextString", - "documentation":"A short single line description of the link. Must be hyperlinked with the URL itself.
Used for text parts with the type URL
.
A short single line description of the link. Must be hyper-linked with the URL itself.
Used for text parts with the type URL
.
The Amazon Resource Name (ARN) to use to update the EventRule.
", + "documentation":"The Amazon Resource Name (ARN) to use to update the EventRule
.
An additional event pattern used to further filter the events this EventRule receives.
For more information, see Amazon EventBridge event patterns in the Amazon EventBridge User Guide.
" + "documentation":"An additional event pattern used to further filter the events this EventRule
receives.
For more information, see Amazon EventBridge event patterns in the Amazon EventBridge User Guide.
" }, "regions":{ "shape":"Regions", - "documentation":"A list of AWS Regions that sends events to this EventRule.
" + "documentation":"A list of Amazon Web Services Regions that sends events to this EventRule
.
The Amazon Resource Name (ARN) to use to update the EventRule.
" + "documentation":"The Amazon Resource Name (ARN) to use to update the EventRule
.
The ARN of the NotificationConfiguration.
" + "documentation":"The ARN of the NotificationConfiguration
.
The Amazon Resource Name (ARN) used to update the NotificationConfiguration.
", + "documentation":"The Amazon Resource Name (ARN) used to update the NotificationConfiguration
.
The name of the NotificationConfiguration.
" + "documentation":"The name of the NotificationConfiguration
.
The description of the NotificationConfiguration.
" + "documentation":"The description of the NotificationConfiguration
.
The status of this NotificationConfiguration.
The status should always be INACTIVE
when part of the CreateNotificationConfiguration response.
Values:
ACTIVE
All EventRules are ACTIVE
and any call can be run.
PARTIALLY_ACTIVE
Some EventRules are ACTIVE
and some are INACTIVE
. Any call can be run.
Any call can be run.
INACTIVE
All EventRules are INACTIVE
and any call can be run.
DELETING
This NotificationConfiguration is being deleted.
Only GET
and LIST
calls can be run.
The aggregation preference of the NotificationConfiguration
.
Values:
LONG
Aggregate notifications for long periods of time (12 hours).
SHORT
Aggregate notifications for short periods of time (5 minutes).
NONE
Don't aggregate notifications.
The ARN used to update the NotificationConfiguration.
" + "documentation":"The ARN used to update the NotificationConfiguration
.
The AWS User Notifications API Reference provides descriptions, API request parameters, and the JSON response for each of the User Notification API actions.
User Notification control APIs are currently available in US East (Virginia) - us-east-1
.
GetNotificationEvent and ListNotificationEvents APIs are currently available in commercial partition Regions and only return notifications stored in the same Region in which they're called.
The User Notifications console can only be used in US East (Virginia). Your data however, is stored in each Region chosen as a notification hub in addition to US East (Virginia).
" + "documentation":"The Amazon Web Services User Notifications API Reference provides descriptions, API request parameters, and the JSON response for each of the User Notification API actions.
User Notification control plane APIs are currently available in US East (Virginia) - us-east-1
.
GetNotificationEvent and ListNotificationEvents APIs are currently available in commercial partition Regions and only return notifications stored in the same Region in which they're called.
The User Notifications console can only be used in US East (Virginia). Your data however, is stored in each Region chosen as a notification hub in addition to US East (Virginia).
" } diff --git a/botocore/data/partitions.json b/botocore/data/partitions.json index a2f0680888..43f6449be3 100644 --- a/botocore/data/partitions.json +++ b/botocore/data/partitions.json @@ -47,6 +47,9 @@ "ap-southeast-5" : { "description" : "Asia Pacific (Malaysia)" }, + "ap-southeast-7" : { + "description" : "Asia Pacific (Thailand)" + }, "aws-global" : { "description" : "AWS Standard global region" }, @@ -89,6 +92,9 @@ "me-south-1" : { "description" : "Middle East (Bahrain)" }, + "mx-central-1" : { + "description" : "Mexico (Central)" + }, "sa-east-1" : { "description" : "South America (Sao Paulo)" }, diff --git a/botocore/data/partnercentral-selling/2022-07-26/service-2.json b/botocore/data/partnercentral-selling/2022-07-26/service-2.json index 5f6f7dfe23..02f9b793bd 100644 --- a/botocore/data/partnercentral-selling/2022-07-26/service-2.json +++ b/botocore/data/partnercentral-selling/2022-07-26/service-2.json @@ -24,12 +24,13 @@ "input":{"shape":"AcceptEngagementInvitationRequest"}, "errors":[ {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"}, {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":" Use the AcceptEngagementInvitation
action to accept an engagement invitation shared by AWS. Accepting the invitation indicates your willingness to participate in the engagement, granting you access to all engagement-related data.
Use the AcceptEngagementInvitation
action to accept an engagement invitation shared by AWS. Accepting the invitation indicates your willingness to participate in the engagement, granting you access to all engagement-related data.
The CreateEngagement
action allows you to create an Engagement
, which serves as a collaborative space between different parties such as AWS Partners and AWS Sellers. This action automatically adds the caller's AWS account as an active member of the newly created Engagement
.
The CreateEngagement
action allows you to create an Engagement
, which serves as a collaborative space between different parties such as AWS Partners and AWS Sellers. This action automatically adds the caller's AWS account as an active member of the newly created Engagement
.
Creates an Opportunity
record in Partner Central. Use this operation to create a potential business opportunity for submission to Amazon Web Services. Creating an opportunity sets Lifecycle.ReviewStatus
to Pending Submission
.
To submit an opportunity, follow these steps:
To create the opportunity, use CreateOpportunity
.
To associate a solution with the opportunity, use AssociateOpportunity
.
To submit the opportunity, use StartEngagementFromOpportunityTask
.
After submission, you can't edit the opportunity until the review is complete. But opportunities in the Pending Submission
state must have complete details. You can update the opportunity while it's in the Pending Submission
state.
There's a set of mandatory fields to create opportunities, but consider providing optional fields to enrich the opportunity record.
", + "documentation":"Creates an Opportunity
record in Partner Central. Use this operation to create a potential business opportunity for submission to Amazon Web Services. Creating an opportunity sets Lifecycle.ReviewStatus
to Pending Submission
.
To submit an opportunity, follow these steps:
To create the opportunity, use CreateOpportunity
.
To associate a solution with the opportunity, use AssociateOpportunity
.
To start the engagement with AWS, use StartEngagementFromOpportunity
.
After submission, you can't edit the opportunity until the review is complete. But opportunities in the Pending Submission
state must have complete details. You can update the opportunity while it's in the Pending Submission
state.
There's a set of mandatory fields to create opportunities, but consider providing optional fields to enrich the opportunity record.
", "idempotent":true }, "CreateResourceSnapshot":{ @@ -155,7 +156,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"Use this action to create a job to generate a snapshot of the specified resource within an engagement. It initiates an asynchronous process to create a resource snapshot. The job creates a new snapshot only if the resource state has changed, adhering to the same access control and immutability rules as direct snapshot creation.
", + "documentation":"Use this action to create a job to generate a snapshot of the specified resource within an engagement. It initiates an asynchronous process to create a resource snapshot. The job creates a new snapshot only if the resource state has changed, adhering to the same access control and immutability rules as direct snapshot creation.
", "idempotent":true }, "DeleteResourceSnapshotJob":{ @@ -167,6 +168,7 @@ "input":{"shape":"DeleteResourceSnapshotJobRequest"}, "errors":[ {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} @@ -221,7 +223,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":" Use this action to retrieve the engagement record for a given EngagementIdentifier
.
Use this action to retrieve the engagement record for a given EngagementIdentifier
.
Use this action to retrieves information about a specific resource snapshot job.
" + "documentation":"Use this action to retrieves information about a specific resource snapshot job.
" }, "GetSellingSystemSettings":{ "name":"GetSellingSystemSettings", @@ -368,7 +370,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":" Retrieves the details of member partners in an engagement. This operation can only be invoked by members of the engagement. The ListEngagementMembers
operation allows you to fetch information about the members of a specific engagement. This action is restricted to members of the engagement being queried.
Retrieves the details of member partners in an Engagement. This operation can only be invoked by members of the Engagement. The ListEngagementMembers
operation allows you to fetch information about the members of a specific Engagement. This action is restricted to members of the Engagement being queried.
Lists the associations between resources and engagements where the caller is a member and has at least one snapshot in the engagement.
" + "documentation":"Lists the associations between resources and engagements where the caller is a member and has at least one snapshot in the engagement.
" }, "ListEngagements":{ "name":"ListEngagements", @@ -400,7 +402,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"This action allows users to retrieve a list of engagement records from Partner Central. This action can be used to manage and track various engagements across different stages of the partner selling process.
" + "documentation":"This action allows users to retrieve a list of Engagement records from Partner Central. This action can be used to manage and track various engagements across different stages of the partner selling process.
" }, "ListOpportunities":{ "name":"ListOpportunities", @@ -430,7 +432,8 @@ "errors":[ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"}, - {"shape":"ValidationException"} + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"Lists resource snapshot jobs owned by the customer. This operation supports various filtering scenarios, including listing all jobs owned by the caller, jobs for a specific engagement, jobs with a specific status, or any combination of these filters.
" }, @@ -448,7 +451,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"Retrieves a list of resource view snapshots based on specified criteria.
" + "documentation":"Retrieves a list of resource view snapshots based on specified criteria. This operation supports various use cases, including:
Fetching all snapshots associated with an engagement.
Retrieving snapshots of a specific resource type within an engagement.
Obtaining snapshots for a particular resource using a specified template.
Accessing the latest snapshot of a resource within an engagement.
Filtering snapshots by resource owner.
Retrieves a list of Partner Solutions that the partner registered on Partner Central. This API is used to generate a list of solutions that an end user selects from for association with an opportunity.
" }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"Returns a list of tags for a resource.
" + }, "PutSellingSystemSettings":{ "name":"PutSellingSystemSettings", "http":{ @@ -492,6 +512,7 @@ "input":{"shape":"RejectEngagementInvitationRequest"}, "errors":[ {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"}, {"shape":"ValidationException"}, @@ -550,7 +571,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"Starts a resource snapshot job that has been previously created.
", + "documentation":"Starts a resource snapshot job that has been previously created.
", "idempotent":true }, "StopResourceSnapshotJob":{ @@ -566,7 +587,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"Stops a resource snapshot job. The job must be started prior to being stopped.
", + "documentation":"Stops a resource snapshot job. The job must be started prior to being stopped.
", "idempotent":true }, "SubmitOpportunity":{ @@ -583,7 +604,45 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":" Use this action to submit an opportunity that was previously created by partner for AWS review. After you perform this action, the opportunity becomes non-editable until it is reviewed by AWS and has LifeCycle.ReviewStatus
as either Approved
or Action Required
.
Use this action to submit an Opportunity that was previously created by partner for AWS review. After you perform this action, the Opportunity becomes non-editable until it is reviewed by AWS and has LifeCycle.ReviewStatus
as either Approved
or Action Required
.
Assigns one or more tags (key-value pairs) to the specified resource.
", + "idempotent":true + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"Removes a tag or tags from a resource.
", + "idempotent":true }, "UpdateOpportunity":{ "name":"UpdateOpportunity", @@ -614,7 +673,7 @@ "members":{ "Catalog":{ "shape":"CatalogIdentifier", - "documentation":" The CatalogType
parameter specifies the catalog associated with the engagement invitation. Accepted values are AWS
and Sandbox
, which determine the environment in which the engagement invitation is managed.
The CatalogType
parameter specifies the catalog associated with the engagement invitation. Accepted values are AWS
and Sandbox
, which determine the environment in which the engagement invitation is managed.
The CreateEngagementRequest$Catalog
parameter specifies the catalog related to the engagement. Accepted values are AWS
and Sandbox
, which determine the environment in which the engagement is managed.
The CreateEngagementRequest$Catalog
parameter specifies the catalog related to the engagement. Accepted values are AWS
and Sandbox
, which determine the environment in which the engagement is managed.
The CreateEngagementRequest$ClientToken
parameter specifies a unique, case-sensitive identifier to ensure that the request is handled exactly once. The value must not exceed sixty-four alphanumeric characters.
The CreateEngagementRequest$ClientToken
parameter specifies a unique, case-sensitive identifier to ensure that the request is handled exactly once. The value must not exceed sixty-four alphanumeric characters.
The Contexts
field is a required array of objects, with a maximum of 5 contexts allowed, specifying detailed information about customer projects associated with the Engagement. Each context object contains a Type
field indicating the context type, which must be CustomerProject
in this version, and a Payload
field containing the CustomerProject
details. The CustomerProject
object is composed of two main components: Customer
and Project
. The Customer
object includes information such as CompanyName
, WebsiteUrl
, Industry
, and CountryCode
, providing essential details about the customer. The Project
object contains Title
, BusinessProblem
, and TargetCompletionDate
, offering insights into the specific project associated with the customer. This structure allows comprehensive context to be included within the Engagement, facilitating effective collaboration between parties by providing relevant customer and project information.
The Contexts
field is a required array of objects, with a maximum of 5 contexts allowed, specifying detailed information about customer projects associated with the Engagement. Each context object contains a Type
field indicating the context type, which must be CustomerProject
in this version, and a Payload
field containing the CustomerProject
details. The CustomerProject
object is composed of two main components: Customer
and Project
. The Customer
object includes information such as CompanyName
, WebsiteUrl
, Industry
, and CountryCode
, providing essential details about the customer. The Project
object contains Title
, BusinessProblem
, and TargetCompletionDate
, offering insights into the specific project associated with the customer. This structure allows comprehensive context to be included within the Engagement, facilitating effective collaboration between parties by providing relevant customer and project information.
Provides a description of the Engagement
.
Provides a description of the Engagement
.
Specifies the title of the Engagement
.
Specifies the title of the Engagement
.
The Amazon Resource Name (ARN) that identifies the engagement.
" + "documentation":"The Amazon Resource Name (ARN) that identifies the engagement.
" }, "Id":{ "shape":"EngagementIdentifier", - "documentation":"Unique identifier assigned to the newly created engagement.
" + "documentation":"Unique identifier assigned to the newly created engagement.
" } } }, @@ -1712,7 +1771,7 @@ }, "PrimaryNeedsFromAws":{ "shape":"PrimaryNeedsFromAws", - "documentation":"Identifies the type of support the partner needs from Amazon Web Services.
Valid values:
Cosell—Architectural Validation: Confirmation from Amazon Web Services that the partner's proposed solution architecture is aligned with Amazon Web Services best practices and poses minimal architectural risks.
Cosell—Business Presentation: Request Amazon Web Services seller's participation in a joint customer presentation.
Cosell—Competitive Information: Access to Amazon Web Services competitive resources and support for the partner's proposed solution.
Cosell—Pricing Assistance: Connect with an Amazon Web Services seller for support situations where a partner may be receiving an upfront discount on a service (for example: EDP deals).
Cosell—Technical Consultation: Connect with an Amazon Web Services Solutions Architect to address the partner's questions about the proposed solution.
Cosell—Total Cost of Ownership Evaluation: Assistance with quoting different cost savings of proposed solutions on Amazon Web Services versus on-premises or a traditional hosting environment.
Cosell—Deal Support: Request Amazon Web Services seller's support to progress the opportunity (for example: joint customer call, strategic positioning).
Cosell—Support for Public Tender/RFx: Opportunity related to the public sector where the partner needs Amazon Web Services RFx support.
Do Not Need Support from AWS Sales Rep: Indicates that a partner doesn't need support from an Amazon Web Services sales representative, and the partner solely manages the opportunity. It's possible to request coselling support on these opportunities at any stage during their lifecycles. This is also known as a for-visibility-only (FVO) opportunity.
Identifies the type of support the partner needs from Amazon Web Services.
Valid values:
Cosell—Architectural Validation: Confirmation from Amazon Web Services that the partner's proposed solution architecture is aligned with Amazon Web Services best practices and poses minimal architectural risks.
Cosell—Business Presentation: Request Amazon Web Services seller's participation in a joint customer presentation.
Cosell—Competitive Information: Access to Amazon Web Services competitive resources and support for the partner's proposed solution.
Cosell—Pricing Assistance: Connect with an Amazon Web Services seller for support situations where a partner may be receiving an upfront discount on a service (for example: EDP deals).
Cosell—Technical Consultation: Connect with an Amazon Web Services Solutions Architect to address the partner's questions about the proposed solution.
Cosell—Total Cost of Ownership Evaluation: Assistance with quoting different cost savings of proposed solutions on Amazon Web Services versus on-premises or a traditional hosting environment.
Cosell—Deal Support: Request Amazon Web Services seller's support to progress the opportunity (for example: joint customer call, strategic positioning).
Cosell—Support for Public Tender/RFx: Opportunity related to the public sector where the partner needs Amazon Web Services RFx support.
Specifies the catalog in which to create the snapshot job. Valid values are AWS
and Sandbox
.
Specifies the catalog in which to create the snapshot job. Valid values are AWS
and Sandbox
.
Specifies a unique, client-generated UUID to ensure that the request is handled exactly once. This token helps prevent duplicate snapshot job creations.
", + "documentation":"A client-generated UUID used for idempotency check. The token helps prevent duplicate job creations.
", "idempotencyToken":true }, "EngagementIdentifier":{ "shape":"EngagementIdentifier", - "documentation":"Specifies the identifier of the engagement associated with the resource to be snapshotted.
" + "documentation":"Specifies the identifier of the engagement associated with the resource to be snapshotted.
" }, "ResourceIdentifier":{ "shape":"ResourceIdentifier", - "documentation":" Specifies the identifier of the specific resource to be snapshotted. The format depends on the ResourceType
.
Specifies the identifier of the specific resource to be snapshotted. The format depends on the ResourceType
.
Specifies the name of the template that defines the schema for the snapshot.
" + "documentation":"Specifies the name of the template that defines the schema for the snapshot.
" }, "ResourceType":{ "shape":"ResourceType", - "documentation":" The type of resource for which the snapshot job is being created. Must be one of the supported resource types Opportunity
.
The type of resource for which the snapshot job is being created. Must be one of the supported resource types i.e. Opportunity
A list of objects specifying each tag name and value.
" } } }, @@ -1798,11 +1861,11 @@ "members":{ "Arn":{ "shape":"ResourceSnapshotJobArn", - "documentation":"The Amazon Resource Name (ARN) of the created snapshot job.
" + "documentation":"The Amazon Resource Name (ARN) of the created snapshot job.
" }, "Id":{ "shape":"ResourceSnapshotJobIdentifier", - "documentation":"The unique identifier for the created snapshot job.
" + "documentation":"The unique identifier for the created snapshot job.
" } } }, @@ -2059,10 +2122,10 @@ "Customer":{"shape":"EngagementCustomer"}, "Project":{ "shape":"EngagementCustomerProjectDetails", - "documentation":"Information about the customer project associated with the Engagement.
" + "documentation":"Information about the customer project associated with the Engagement.
" } }, - "documentation":"The CustomerProjects structure in Engagements offers a flexible framework for managing customer-project relationships. It supports multiple customers per Engagement and multiple projects per customer, while also allowing for customers without projects and projects without specific customers.
All Engagement members have full visibility of customers and their associated projects, enabling the capture of relevant context even when project details are not fully defined. This structure also facilitates targeted invitations, allowing partners to focus on specific customers and their business problems when sending Engagement invitations.
" + "documentation":"The CustomerProjects structure in Engagements offers a flexible framework for managing customer-project relationships. It supports multiple customers per Engagement and multiple projects per customer, while also allowing for customers without projects and projects without specific customers.
All Engagement members have full visibility of customers and their associated projects, enabling the capture of relevant context even when project details are not fully defined. This structure also facilitates targeted invitations, allowing partners to focus on specific customers and their business problems when sending Engagement invitations.
" }, "CustomerSummary":{ "type":"structure", @@ -2172,24 +2235,24 @@ "members":{ "Payload":{ "shape":"EngagementContextPayload", - "documentation":"Contains the specific details of the Engagement context. The structure of this payload varies depending on the Type field.
" + "documentation":"Contains the specific details of the Engagement context. The structure of this payload varies depending on the Type field.
" }, "Type":{ "shape":"EngagementContextType", - "documentation":"Specifies the type of Engagement context. Valid values are \"CustomerProject\" or \"Document\", indicating whether the context relates to a customer project or a document respectively.
" + "documentation":"Specifies the type of Engagement context. Valid values are \"CustomerProject\" or \"Document\", indicating whether the context relates to a customer project or a document respectively.
" } }, - "documentation":"Provides detailed context information for an Engagement. This structure allows for specifying the type of context and its associated payload.
" + "documentation":"Provides detailed context information for an Engagement. This structure allows for specifying the type of context and its associated payload.
" }, "EngagementContextPayload":{ "type":"structure", "members":{ "CustomerProject":{ "shape":"CustomerProjectsContext", - "documentation":"Contains detailed information about a customer project when the context type is \"CustomerProject\". This field is present only when the Type in EngagementContextDetails is set to \"CustomerProject\".
" + "documentation":"Contains detailed information about a customer project when the context type is \"CustomerProject\". This field is present only when the Type in EngagementContextDetails is set to \"CustomerProject\".
" } }, - "documentation":"Represents the payload of an Engagement context. The structure of this payload varies based on the context type specified in the EngagementContextDetails.
", + "documentation":"Represents the payload of an Engagement context. The structure of this payload varies based on the context type specified in the EngagementContextDetails.
", "union":true }, "EngagementContextType":{ @@ -2246,18 +2309,18 @@ "members":{ "BusinessProblem":{ "shape":"EngagementCustomerBusinessProblem", - "documentation":"A description of the business problem the project aims to solve.
" + "documentation":"A description of the business problem the project aims to solve.
" }, "TargetCompletionDate":{ "shape":"EngagementCustomerProjectDetailsTargetCompletionDateString", - "documentation":"The target completion date for the customer's project.
" + "documentation":"The target completion date for the customer's project.
" }, "Title":{ "shape":"EngagementCustomerProjectTitle", - "documentation":"The title of the project.
" + "documentation":"The title of the project.
" } }, - "documentation":"Provides comprehensive details about a customer project associated with an Engagement. This may include information such as project goals, timelines, and specific customer requirements.
" + "documentation":"Provides comprehensive details about a customer project associated with an Engagement. This may include information such as project goals, timelines, and specific customer requirements.
" }, "EngagementCustomerProjectDetailsTargetCompletionDateString":{ "type":"string", @@ -2387,15 +2450,15 @@ "members":{ "AccountId":{ "shape":"AwsAccount", - "documentation":"This is the unique identifier for the AWS account associated with the member organization. It's used for AWS-related operations and identity verification.
" + "documentation":"This is the unique identifier for the AWS account associated with the member organization. It's used for AWS-related operations and identity verification.
" }, "CompanyName":{ "shape":"MemberCompanyName", - "documentation":"The official name of the member's company or organization.
" + "documentation":"The official name of the member's company or organization.
" }, "WebsiteUrl":{ "shape":"String", - "documentation":"The URL of the member company's website. This offers a way to find more information about the member organization and serves as an additional identifier.
" + "documentation":"The URL of the member company's website. This offers a way to find more information about the member organization and serves as an additional identifier.
" } }, "documentation":"Engagement members are the participants in an Engagement, which is likely a collaborative project or business opportunity within the AWS partner network. Members can be different partner organizations or AWS accounts that are working together on a specific engagement.
Each member is represented by their AWS Account ID, Company Name, and associated details. Members have a status within the Engagement (PENDING, ACCEPTED, REJECTED, or WITHDRAWN), indicating their current state of participation. Only existing members of an Engagement can view the list of other members. This implies a level of privacy and access control within the Engagement structure.
" @@ -2409,14 +2472,14 @@ "members":{ "CompanyName":{ "shape":"MemberCompanyName", - "documentation":"The official name of the member's company or organization.
" + "documentation":"The official name of the member's company or organization.
" }, "WebsiteUrl":{ "shape":"String", - "documentation":"The URL of the member company's website. This offers a way to find more information about the member organization and serves as an additional identifier.
" + "documentation":"The URL of the member company's website. This offers a way to find more information about the member organization and serves as an additional identifier.
" } }, - "documentation":"The EngagementMemberSummary provides a snapshot of essential information about participants in an AWS Partner Central Engagement. This compact data structure encapsulates key details of each member, facilitating efficient collaboration and management within the Engagement.
" + "documentation":"The EngagementMemberSummary provides a snapshot of essential information about participants in an AWS Partner Central Engagement. This compact data structure encapsulates key details of each member, facilitating efficient collaboration and management within the Engagement.
" }, "EngagementMembers":{ "type":"list", @@ -2440,7 +2503,7 @@ }, "CreatedBy":{ "shape":"AwsAccount", - "documentation":"The AWS account ID of the entity that created the association.
" + "documentation":"The AWS account ID of the entity that owns the resource. Identifies the account responsible for or having primary control over the resource.
" }, "EngagementId":{ "shape":"EngagementIdentifier", @@ -2478,14 +2541,14 @@ "members":{ "SortBy":{ "shape":"EngagementSortName", - "documentation":"The field by which to sort the results.
" + "documentation":"The field by which to sort the results.
" }, "SortOrder":{ "shape":"SortOrder", - "documentation":"The order in which to sort the results.
" + "documentation":"The order in which to sort the results.
" } }, - "documentation":"Specifies the sorting parameters for listing Engagements.
" + "documentation":"Specifies the sorting parameters for listing Engagements.
" }, "EngagementSortName":{ "type":"string", @@ -2496,30 +2559,30 @@ "members":{ "Arn":{ "shape":"EngagementArn", - "documentation":"The Amazon Resource Name (ARN) of the created engagement.
" + "documentation":"The Amazon Resource Name (ARN) of the created Engagement.
" }, "CreatedAt":{ "shape":"DateTime", - "documentation":"The date and time when the engagement was created.
" + "documentation":"The date and time when the Engagement was created.
" }, "CreatedBy":{ "shape":"AwsAccount", - "documentation":"The AWS account ID of the engagement creator.
" + "documentation":"The AWS Account ID of the Engagement creator.
" }, "Id":{ "shape":"EngagementIdentifier", - "documentation":"The unique identifier for the engagement.
" + "documentation":"The unique identifier for the Engagement.
" }, "MemberCount":{ "shape":"Integer", - "documentation":"The number of members in the engagement.
" + "documentation":"The number of members in the Engagement.
" }, "Title":{ "shape":"EngagementTitle", - "documentation":"The title of the engagement.
" + "documentation":"The title of the Engagement.
" } }, - "documentation":" An object that contains an Engagement
's subset of fields.
An object that contains an Engagement
's subset of fields.
A URL providing additional information or context about the spend estimation.
" + "documentation":"A URL providing additional information or context about the spend estimation.
" }, "Frequency":{ "shape":"PaymentFrequency", @@ -2852,11 +2915,11 @@ }, "EngagementDescription":{ "shape":"EngagementDescription", - "documentation":"The description of the engagement associated with this invitation.
" + "documentation":"The description of the engagement associated with this invitation.
" }, "EngagementId":{ "shape":"EngagementIdentifier", - "documentation":"The identifier of the engagement associated with this invitation.This ID links the invitation to its corresponding engagement.
" + "documentation":"The identifier of the engagement associated with this invitation.This ID links the invitation to its corresponding engagement.
" }, "EngagementTitle":{ "shape":"EngagementTitle", @@ -2864,7 +2927,7 @@ }, "ExistingMembers":{ "shape":"EngagementMemberSummaries", - "documentation":"A list of active members currently part of the Engagement. This array contains a maximum of 10 members, each represented by an object with the following properties.
CompanyName: The name of the member's company.
WebsiteUrl: The website URL of the member's company.
A list of active members currently part of the Engagement. This array contains a maximum of 10 members, each represented by an object with the following properties.
CompanyName: The name of the member's company.
WebsiteUrl: The website URL of the member's company.
The message sent to the invited partner when the invitation was created.
" + "documentation":"The message sent to the invited partner when the invitation was created.
" }, "Payload":{ "shape":"Payload", @@ -2926,11 +2989,11 @@ "members":{ "Catalog":{ "shape":"CatalogIdentifier", - "documentation":" Specifies the catalog related to the engagement request. Valid values are AWS
and Sandbox
.
Specifies the catalog related to the engagement request. Valid values are AWS
and Sandbox
.
Specifies the identifier of the Engagement record to retrieve.
" + "documentation":"Specifies the identifier of the Engagement record to retrieve.
" } } }, @@ -2939,35 +3002,35 @@ "members":{ "Arn":{ "shape":"EngagementArn", - "documentation":"The Amazon Resource Name (ARN) of the engagement retrieved.
" + "documentation":"The Amazon Resource Name (ARN) of the engagement retrieved.
" }, "Contexts":{ "shape":"EngagementContexts", - "documentation":"A list of context objects associated with the engagement. Each context provides additional information related to the Engagement, such as customer projects or documents.
" + "documentation":"A list of context objects associated with the engagement. Each context provides additional information related to the Engagement, such as customer projects or documents.
" }, "CreatedAt":{ "shape":"DateTime", - "documentation":"The date and time when the Engagement was created, presented in ISO 8601 format (UTC). For example: \"2023-05-01T20:37:46Z\". This timestamp helps track the lifecycle of the Engagement.
" + "documentation":"The date and time when the Engagement was created, presented in ISO 8601 format (UTC). For example: \"2023-05-01T20:37:46Z\". This timestamp helps track the lifecycle of the Engagement.
" }, "CreatedBy":{ "shape":"AwsAccount", - "documentation":"The AWS account ID of the user who originally created the engagement. This field helps in tracking the origin of the engagement.
" + "documentation":"The AWS account ID of the user who originally created the engagement. This field helps in tracking the origin of the engagement.
" }, "Description":{ "shape":"EngagementDescription", - "documentation":"A more detailed description of the engagement. This provides additional context or information about the engagement's purpose or scope.
" + "documentation":"A more detailed description of the engagement. This provides additional context or information about the engagement's purpose or scope.
" }, "Id":{ "shape":"EngagementIdentifier", - "documentation":"The unique resource identifier of the engagement retrieved.
" + "documentation":"The unique resource identifier of the engagement retrieved.
" }, "MemberCount":{ "shape":"Integer", - "documentation":"Specifies the current count of members participating in the Engagement. This count includes all active members regardless of their roles or permissions within the Engagement.
" + "documentation":"Specifies the current count of members participating in the Engagement. This count includes all active members regardless of their roles or permissions within the Engagement.
" }, "Title":{ "shape":"EngagementTitle", - "documentation":"The title of the engagement. It provides a brief, descriptive name for the engagement that is meaningful and easily recognizable.
" + "documentation":"The title of the engagement. It provides a brief, descriptive name for the engagement that is meaningful and easily recognizable.
" } } }, @@ -3048,7 +3111,7 @@ }, "PrimaryNeedsFromAws":{ "shape":"PrimaryNeedsFromAws", - "documentation":"Identifies the type of support the partner needs from Amazon Web Services.
Valid values:
Cosell—Architectural Validation: Confirmation from Amazon Web Services that the partner's proposed solution architecture is aligned with Amazon Web Services best practices and poses minimal architectural risks.
Cosell—Business Presentation: Request Amazon Web Services seller's participation in a joint customer presentation.
Cosell—Competitive Information: Access to Amazon Web Services competitive resources and support for the partner's proposed solution.
Cosell—Pricing Assistance: Connect with an Amazon Web Services seller for support situations where a partner may be receiving an upfront discount on a service (for example: EDP deals).
Cosell—Technical Consultation: Connect with an Amazon Web Services Solutions Architect to address the partner's questions about the proposed solution.
Cosell—Total Cost of Ownership Evaluation: Assistance with quoting different cost savings of proposed solutions on Amazon Web Services versus on-premises or a traditional hosting environment.
Cosell—Deal Support: Request Amazon Web Services seller's support to progress the opportunity (for example: joint customer call, strategic positioning).
Cosell—Support for Public Tender/RFx: Opportunity related to the public sector where the partner needs Amazon Web Services RFx support.
Do Not Need Support from Amazon Web Services Sales Rep: Indicates that a partner doesn't need support from an Amazon Web Services sales representative, and the partner solely manages the opportunity. It's possible to request coselling support on these opportunities at any stage during their lifecycle. Also known as, for-visibility-only (FVO) opportunity.
Identifies the type of support the partner needs from Amazon Web Services.
Valid values:
Cosell—Architectural Validation: Confirmation from Amazon Web Services that the partner's proposed solution architecture is aligned with Amazon Web Services best practices and poses minimal architectural risks.
Cosell—Business Presentation: Request Amazon Web Services seller's participation in a joint customer presentation.
Cosell—Competitive Information: Access to Amazon Web Services competitive resources and support for the partner's proposed solution.
Cosell—Pricing Assistance: Connect with an Amazon Web Services seller for support situations where a partner may be receiving an upfront discount on a service (for example: EDP deals).
Cosell—Technical Consultation: Connect with an Amazon Web Services Solutions Architect to address the partner's questions about the proposed solution.
Cosell—Total Cost of Ownership Evaluation: Assistance with quoting different cost savings of proposed solutions on Amazon Web Services versus on-premises or a traditional hosting environment.
Cosell—Deal Support: Request Amazon Web Services seller's support to progress the opportunity (for example: joint customer call, strategic positioning).
Cosell—Support for Public Tender/RFx: Opportunity related to the public sector where the partner needs Amazon Web Services RFx support.
Specifies the catalog related to the request. Valid values are:
AWS: Retrieves the snapshot job from the production AWS environment.
Sandbox: Retrieves the snapshot job from a sandbox environment used for testing or development purposes.
Specifies the catalog related to the request. Valid values are:
AWS: Retrieves the snapshot job from the production AWS environment.
Sandbox: Retrieves the snapshot job from a sandbox environment used for testing or development purposes.
The unique identifier of the resource snapshot job to be retrieved. This identifier is crucial for pinpointing the specific job you want to query.
" + "documentation":"The unique identifier of the resource snapshot job to be retrieved. This identifier is crucial for pinpointing the specific job you want to query.
" } } }, @@ -3092,51 +3155,51 @@ "members":{ "Arn":{ "shape":"ResourceSnapshotJobArn", - "documentation":"he Amazon Resource Name (ARN) of the snapshot job. This globally unique identifier can be used for resource-specific operations across AWS services.
" + "documentation":"The Amazon Resource Name (ARN) of the snapshot job. This globally unique identifier can be used for resource-specific operations across AWS services.
" }, "Catalog":{ "shape":"CatalogIdentifier", - "documentation":"The catalog in which the snapshot job was created. This will match the catalog specified in the request.
" + "documentation":"The catalog in which the snapshot job was created. This will match the Catalog specified in the request.
" }, "CreatedAt":{ "shape":"DateTime", - "documentation":"The date and time when the snapshot job was created, in ISO 8601 format (UTC). Example: \"2023-05-01T20:37:46Z\"
" + "documentation":"The date and time when the snapshot job was created in ISO 8601 format (UTC). Example: \"2023-05-01T20:37:46Z\"
" }, "EngagementId":{ "shape":"EngagementIdentifier", - "documentation":"The identifier of the engagement associated with this snapshot job. This links the job to a specific engagement context.
" + "documentation":"The identifier of the engagement associated with this snapshot job. This links the job to a specific engagement context.
" }, "Id":{ "shape":"ResourceSnapshotJobIdentifier", - "documentation":" The unique identifier of the snapshot job. This matches the ResourceSnapshotJobIdentifier
provided in the request.
The unique identifier of the snapshot job. This matches the ResourceSnapshotJobIdentifier provided in the request.
" }, "LastFailure":{ "shape":"String", - "documentation":"If the job has encountered any failures, this field contains the error message from the most recent failure. This can be useful for troubleshooting issues with the job.
" + "documentation":"If the job has encountered any failures, this field contains the error message from the most recent failure. This can be useful for troubleshooting issues with the job.
" }, "LastSuccessfulExecutionDate":{ "shape":"DateTime", - "documentation":"The date and time of the last successful execution of the job, in ISO 8601 format (UTC). Example: \"2023-05-01T20:37:46Z\"
" + "documentation":"The date and time of the last successful execution of the job, in ISO 8601 format (UTC). Example: \"2023-05-01T20:37:46Z\"
" }, "ResourceArn":{ "shape":"ResourceArn", - "documentation":"The Amazon Resource Name (ARN) of the resource being snapshotted. This provides a globally unique identifier for the resource across AWS.
" + "documentation":"The Amazon Resource Name (ARN) of the resource being snapshotted. This provides a globally unique identifier for the resource across AWS.
" }, "ResourceId":{ "shape":"ResourceIdentifier", - "documentation":" The identifier of the specific resource being snapshotted. The format may vary depending on the ResourceType
.
The identifier of the specific resource being snapshotted. The format might vary depending on the ResourceType.
" }, "ResourceSnapshotTemplateName":{ "shape":"ResourceTemplateName", - "documentation":"The name of the template used for creating the snapshot. This is the same as the template name. It defines the structure and content of the snapshot.
" + "documentation":"The name of the template used for creating the snapshot. This is the same as the template name. It defines the structure and content of the snapshot.
" }, "ResourceType":{ "shape":"ResourceType", - "documentation":" The type of resource being snapshotted. This would have Opportunity
as a value as it is dependent on the supported resource type.
The type of resource being snapshotted. This would have \"Opportunity\" as a value as it is dependent on the supported resource type.
" }, "Status":{ "shape":"ResourceSnapshotJobStatus", - "documentation":"The current status of the snapshot job. Valid values:
STOPPED: The job is not currently running.
RUNNING: The job is actively executing.
The current status of the snapshot job. Valid values:
STOPPED: The job is not currently running.
RUNNING: The job is actively executing.
The Amazon Resource Name (ARN) of the snapshot. This globally unique identifier can be used for resource-specific operations across AWS services.
" + "documentation":"The Amazon Resource Name (ARN) that uniquely identifies the resource snapshot.
" }, "Catalog":{ "shape":"CatalogIdentifier", @@ -3203,19 +3266,19 @@ "Payload":{"shape":"ResourceSnapshotPayload"}, "ResourceId":{ "shape":"ResourceIdentifier", - "documentation":" The identifier of the specific resource that was snapshotted. Matches the ResourceIdentifier
specified in the request.
The identifier of the specific resource that was snapshotted. Matches the ResourceIdentifier specified in the request.
" }, "ResourceSnapshotTemplateName":{ "shape":"ResourceTemplateName", - "documentation":"The name of the view used for this snapshot. This is the same as the template name.
" + "documentation":"The name of the view used for this snapshot. This is the same as the template name.
" }, "ResourceType":{ "shape":"ResourceType", - "documentation":" The type of the resource that was snapshotted. Matches the ResourceType
specified in the request.
The type of the resource that was snapshotted. Matches the ResourceType specified in the request.
" }, "Revision":{ "shape":"ResourceSnapshotRevision", - "documentation":"The revision number of this snapshot. This is a positive integer that is sequential and unique within the context of a resource view.
" + "documentation":"The revision number of this snapshot. This is a positive integer that is sequential and unique within the context of a resource view.
" } } }, @@ -3595,7 +3658,7 @@ }, "EngagementInvitationId":{ "shape":"EngagementInvitationIdentifier", - "documentation":"The unique identifier of the engagement identifier created as a result of the task. This field is populated when the task is completed successfully.
" + "documentation":"The unique identifier of the Engagement Invitation.
" }, "Message":{ "shape":"String", @@ -3761,19 +3824,19 @@ "members":{ "Catalog":{ "shape":"CatalogIdentifier", - "documentation":"The catalog related to the request.
" + "documentation":"The catalog related to the request.
" }, "Identifier":{ "shape":"EngagementArnOrIdentifier", - "documentation":"Identifier of the engagement record to retrieve members from.
" + "documentation":"Identifier of the Engagement record to retrieve members from.
" }, "MaxResults":{ "shape":"MemberPageSize", - "documentation":"The maximum number of results to return in a single call.
" + "documentation":"The maximum number of results to return in a single call.
" }, "NextToken":{ "shape":"String", - "documentation":"The token for the next set of results.
" + "documentation":"The token for the next set of results.
" } } }, @@ -3787,7 +3850,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"A pagination token used to retrieve the next set of results. If there are more results available than can be returned in a single response, this token will be present. Use this token in a subsequent request to retrieve the next page of results. If there are no more results, this value will be null.
" + "documentation":"A pagination token used to retrieve the next set of results. If there are more results available than can be returned in a single response, this token will be present. Use this token in a subsequent request to retrieve the next page of results. If there are no more results, this value will be null.
" } } }, @@ -3797,27 +3860,27 @@ "members":{ "Catalog":{ "shape":"CatalogIdentifier", - "documentation":"Specifies the catalog in which to search for engagement-resource associations.
" + "documentation":"Specifies the catalog in which to search for engagement-resource associations. Valid Values: \"AWS\" or \"Sandbox\"
AWS
for production environments.
Sandbox
for testing and development purposes.
Filters the results to include only associations with resources owned by the specified AWS account. Use this when you want to find associations related to resources owned by a particular account.
" + "documentation":"Filters the response to include only snapshots of resources owned by the specified AWS account ID. Use this when you want to find associations related to resources owned by a particular account.
" }, "EngagementIdentifier":{ "shape":"EngagementIdentifier", - "documentation":"Filters the results to include only associations related to the specified engagement. Use this when you want to find all resources associated with a specific engagement.
" + "documentation":"Filters the results to include only associations related to the specified engagement. Use this when you want to find all resources associated with a specific engagement.
" }, "MaxResults":{ "shape":"ListEngagementResourceAssociationsRequestMaxResultsInteger", - "documentation":"Limits the number of results returned in a single call. Use this to control the number of results returned, especially useful for pagination.
" + "documentation":"Limits the number of results returned in a single call. Use this to control the number of results returned, especially useful for pagination.
" }, "NextToken":{ "shape":"String", - "documentation":"A token used for pagination of results. Include this token in subsequent requests to retrieve the next set of results.
" + "documentation":"A token used for pagination of results. Include this token in subsequent requests to retrieve the next set of results.
" }, "ResourceIdentifier":{ "shape":"ResourceIdentifier", - "documentation":"Filters the results to include only associations with the specified resource. Varies depending on the resource type. Use this when you want to find all engagements associated with a specific resource.
" + "documentation":"Filters the results to include only associations with the specified resource. Varies depending on the resource type. Use this when you want to find all engagements associated with a specific resource.
" }, "ResourceType":{ "shape":"ResourceType", @@ -3859,19 +3922,19 @@ }, "EngagementIdentifier":{ "shape":"EngagementIdentifiers", - "documentation":"An array of strings representing engagement identifiers to retrieve.
" + "documentation":"An array of strings representing engagement identifiers to retrieve.
" }, "ExcludeCreatedBy":{ "shape":"AwsAccountList", - "documentation":"An array of strings representing AWS Account IDs. Use this to exclude engagements created by specific users.
" + "documentation":"An array of strings representing AWS Account IDs. Use this to exclude engagements created by specific users.
" }, "MaxResults":{ "shape":"EngagementPageSize", - "documentation":"The maximum number of results to return in a single call.
" + "documentation":"The maximum number of results to return in a single call.
" }, "NextToken":{ "shape":"String", - "documentation":"The token for the next set of results. This value is returned from a previous call.
" + "documentation":"The token for the next set of results. This value is returned from a previous call.
" }, "Sort":{ "shape":"EngagementSort", @@ -3885,11 +3948,11 @@ "members":{ "EngagementSummaryList":{ "shape":"EngagementSummaryList", - "documentation":"An array of engagement summary objects.
" + "documentation":"An array of engagement summary objects.
" }, "NextToken":{ "shape":"String", - "documentation":"The token to retrieve the next set of results. This field will be null if there are no more results.
" + "documentation":"The token to retrieve the next set of results. This field will be null if there are no more results.
" } } }, @@ -4036,7 +4099,7 @@ }, "CreatedBy":{ "shape":"AwsAccount", - "documentation":"Filters the response to include only snapshots of resources created by the specified AWS account.
" + "documentation":"Filters the response to include only snapshots of resources owned by the specified AWS account.
" }, "EngagementIdentifier":{ "shape":"EngagementIdentifier", @@ -4056,7 +4119,7 @@ }, "ResourceSnapshotTemplateIdentifier":{ "shape":"ResourceTemplateName", - "documentation":"Filters the response to include only snapshots created using the specified template.
" + "documentation":"Filters the response to include only snapshots created using the specified template.
" }, "ResourceType":{ "shape":"ResourceType", @@ -4150,6 +4213,26 @@ } } }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"TaggableResourceArn", + "documentation":"The Amazon Resource Name (ARN) of the resource for which you want to retrieve tags.
" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "required":["Tags"], + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"A map of the key-value pairs for the tag or tags assigned to the specified resource.
" + } + } + }, "ListTasksSortBase":{ "type":"structure", "required":[ @@ -4731,12 +4814,12 @@ "EngagementConflict", "OpportunitySubmissionFailed", "EngagementInvitationConflict", + "InternalError", "OpportunityValidationFailed", "OpportunityConflict", "ResourceSnapshotAccessDenied", "ResourceSnapshotValidationFailed", "ResourceSnapshotConflict", - "InternalError", "ServiceQuotaExceeded", "RequestThrottled" ] @@ -4877,7 +4960,7 @@ }, "EngagementId":{ "shape":"EngagementIdentifier", - "documentation":"The unique identifier for the engagement within the AWS Partner Central system. This ID is used for direct references to the engagement within the service.
" + "documentation":"The unique identifier of the Engagement.
" }, "Id":{ "shape":"ResourceSnapshotJobIdentifier", @@ -4885,7 +4968,7 @@ }, "Status":{ "shape":"ResourceSnapshotJobStatus", - "documentation":"Represents the current status of the resource snapshot job.
" + "documentation":"The current status of the snapshot job.
Valid values:
STOPPED: The job is not currently running.
RUNNING: The job is actively executing.
An object that contains a Resource Snapshot Job
's subset of fields.
The AWS account ID of the principal (user or role) who created the snapshot. This helps in tracking the origin of the snapshot.
" + "documentation":"The AWS account ID of the entity that owns the resource from which the snapshot was created.
" }, "ResourceId":{ "shape":"ResourceIdentifier", @@ -4935,7 +5018,7 @@ }, "Revision":{ "shape":"ResourceSnapshotRevision", - "documentation":"The revision number of the snapshot. This integer value is incremented each time the snapshot is updated, allowing for version tracking of the resource snapshot.
" + "documentation":"The revision number of the snapshot. This integer value is incremented each time the snapshot is updated, allowing for version tracking of the resource snapshot.
" } }, "documentation":"Provides a concise summary of a resource snapshot, including its unique identifier and version information. This structure is used to quickly reference and identify specific versions of resource snapshots.
" @@ -5216,6 +5299,10 @@ "Identifier":{ "shape":"EngagementInvitationArnOrIdentifier", "documentation":"Specifies the unique identifier of the EngagementInvitation
to be accepted. Providing the correct identifier helps ensure that the correct engagement is processed.
A list of objects specifying each tag name and value.
" } } }, @@ -5245,7 +5332,7 @@ }, "ResourceSnapshotJobId":{ "shape":"ResourceSnapshotJobIdentifier", - "documentation":"The identifier of the resource snapshot job created as part of this task.
" + "documentation":"The identifier of the Resource Snapshot Job created as part of this task.
" }, "StartTime":{ "shape":"DateTime", @@ -5287,6 +5374,10 @@ "Identifier":{ "shape":"OpportunityIdentifier", "documentation":"The unique identifier of the opportunity from which the engagement task is to be initiated. This helps ensure that the task is applied to the correct opportunity.
" + }, + "Tags":{ + "shape":"TagList", + "documentation":"A list of objects specifying each tag name and value.
" } } }, @@ -5300,11 +5391,11 @@ "members":{ "EngagementId":{ "shape":"EngagementIdentifier", - "documentation":"The identifier of the newly created engagement. Only populated if TaskStatus is COMPLETE.
" + "documentation":"The identifier of the newly created Engagement. Only populated if TaskStatus is COMPLETE.
" }, "EngagementInvitationId":{ "shape":"EngagementInvitationIdentifier", - "documentation":"The identifier of the new engagement invitation. Only populated if TaskStatus is COMPLETE.
" + "documentation":"The identifier of the new Engagement invitation. Only populated if TaskStatus is COMPLETE.
" }, "Message":{ "shape":"String", @@ -5320,7 +5411,7 @@ }, "ResourceSnapshotJobId":{ "shape":"ResourceSnapshotJobIdentifier", - "documentation":"The identifier of the resource snapshot job created to add the opportunity resource snapshot to the Engagement. Only populated if TaskStatus is COMPLETE.
" + "documentation":"The identifier of the resource snapshot job created to add the opportunity resource snapshot to the Engagement. Only populated if TaskStatus is COMPLETE
" }, "StartTime":{ "shape":"DateTime", @@ -5349,11 +5440,11 @@ "members":{ "Catalog":{ "shape":"CatalogIdentifier", - "documentation":"Specifies the catalog related to the request.
" + "documentation":"Specifies the catalog related to the request. Valid values are:
AWS: Starts the request from the production AWS environment.
Sandbox: Starts the request from a sandbox environment used for testing or development purposes.
The identifier of the resource snapshot job to start.
" + "documentation":"The identifier of the resource snapshot job to start.
" } } }, @@ -5366,11 +5457,11 @@ "members":{ "Catalog":{ "shape":"CatalogIdentifier", - "documentation":"Specifies the catalog related to the request.
" + "documentation":"Specifies the catalog related to the request. Valid values are:
AWS: Stops the request from the production AWS environment.
Sandbox: Stops the request from a sandbox environment used for testing or development purposes.
The identifier of the job to stop.
" + "documentation":"The identifier of the job to stop.
" } } }, @@ -5385,22 +5476,92 @@ "members":{ "Catalog":{ "shape":"CatalogIdentifier", - "documentation":"Specifies the catalog related to the request.
" + "documentation":"Specifies the catalog related to the request. Valid values are:
AWS: Submits the opportunity request from the production AWS environment.
Sandbox: Submits the opportunity request from a sandbox environment used for testing or development purposes.
The identifier of the opportunity previously created by partner and needs to be submitted.
" + "documentation":"The identifier of the Opportunity previously created by partner and needs to be submitted.
" }, "InvolvementType":{ "shape":"SalesInvolvementType", - "documentation":"Specifies the level of AWS sellers' involvement on the opportunity.
" + "documentation":"Specifies the level of AWS sellers' involvement on the opportunity. Valid values:
Co-sell
: Indicates the user wants to co-sell with AWS. Share the opportunity with AWS to receive deal assistance and support.
For Visibility Only
: Indicates that the user does not need support from AWS Sales Rep. Share this opportunity with AWS for visibility only, you will not receive deal assistance and support.
Determines whether to restrict visibility of the opportunity from AWS sales. Default value is Full.
" + "documentation":"Determines whether to restrict visibility of the opportunity from AWS sales. Default value is Full. Valid values:
Full
: The opportunity is fully visible to AWS sales.
Limited
: The opportunity has restricted visibility to AWS sales.
The key in the tag.
" + }, + "Value":{ + "shape":"TagValue", + "documentation":"The value in the tag.
" + } + }, + "documentation":"The key-value pair assigned to a specified resource.
" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":1 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"TaggableResourceArn", + "documentation":"The Amazon Resource Name (ARN) of the resource that you want to tag.
" + }, + "Tags":{ + "shape":"TagList", + "documentation":"A map of the key-value pairs of the tag or tags to assign to the resource.
" } } }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "TaggableResourceArn":{ + "type":"string", + "max":1000, + "min":1, + "pattern":"^arn:[\\w+=/,.@-]+:partnercentral:[\\w+=/,.@-]*:[0-9]{12}:catalog/([a-zA-Z]+)/[\\w+=,.@-]+(/[\\w+=,.@-]+)*$" + }, "TaskArn":{ "type":"string", "pattern":"^arn:.*" @@ -5441,6 +5602,28 @@ "documentation":"This error occurs when there are too many requests sent. Review the provided quotas and adapt your usage to avoid throttling.
This error occurs when there are too many requests sent. Review the provided Quotas and retry after the provided delay.
", "exception":true }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"TaggableResourceArn", + "documentation":"The Amazon Resource Name (ARN) of the resource that you want to untag.
" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"The keys of the key-value pairs for the tag or tags you want to remove from the specified resource.
" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateOpportunityRequest":{ "type":"structure", "required":[ @@ -5487,7 +5670,7 @@ }, "PrimaryNeedsFromAws":{ "shape":"PrimaryNeedsFromAws", - "documentation":"Identifies the type of support the partner needs from Amazon Web Services.
Valid values:
Cosell—Architectural Validation: Confirmation from Amazon Web Services that the partner's proposed solution architecture is aligned with Amazon Web Services best practices and poses minimal architectural risks.
Cosell—Business Presentation: Request Amazon Web Services seller's participation in a joint customer presentation.
Cosell—Competitive Information: Access to Amazon Web Services competitive resources and support for the partner's proposed solution.
Cosell—Pricing Assistance: Connect with an AWS seller for support situations where a partner may be receiving an upfront discount on a service (for example: EDP deals).
Cosell—Technical Consultation: Connection with an Amazon Web Services Solutions Architect to address the partner's questions about the proposed solution.
Cosell—Total Cost of Ownership Evaluation: Assistance with quoting different cost savings of proposed solutions on Amazon Web Services versus on-premises or a traditional hosting environment.
Cosell—Deal Support: Request Amazon Web Services seller's support to progress the opportunity (for example: joint customer call, strategic positioning).
Cosell—Support for Public Tender/RFx: Opportunity related to the public sector where the partner needs RFx support from Amazon Web Services.
Do Not Need Support from AWS Sales Rep: Indicates that a partner doesn't need support from an Amazon Web Services Sales representative. The opportunity is managed solely by the partner. It's possible to request coselling support on these opportunities at any stage during their lifecycle. Also known as, for-visibility-only (FVO) opportunity.
Identifies the type of support the partner needs from Amazon Web Services.
Valid values:
Cosell—Architectural Validation: Confirmation from Amazon Web Services that the partner's proposed solution architecture is aligned with Amazon Web Services best practices and poses minimal architectural risks.
Cosell—Business Presentation: Request Amazon Web Services seller's participation in a joint customer presentation.
Cosell—Competitive Information: Access to Amazon Web Services competitive resources and support for the partner's proposed solution.
Cosell—Pricing Assistance: Connect with an AWS seller for support situations where a partner may be receiving an upfront discount on a service (for example: EDP deals).
Cosell—Technical Consultation: Connection with an Amazon Web Services Solutions Architect to address the partner's questions about the proposed solution.
Cosell—Total Cost of Ownership Evaluation: Assistance with quoting different cost savings of proposed solutions on Amazon Web Services versus on-premises or a traditional hosting environment.
Cosell—Deal Support: Request Amazon Web Services seller's support to progress the opportunity (for example: joint customer call, strategic positioning).
Cosell—Support for Public Tender/RFx: Opportunity related to the public sector where the partner needs RFx support from Amazon Web Services.
Adds or updates a permission policy for a Q Business application, allowing cross-account access for an ISV. This operation creates a new policy statement for the specified Q Business application. The policy statement defines the IAM actions that the ISV is allowed to perform on the Q Business application's resources.
" + "documentation":"Adds or updates a permission policy for a Amazon Q Business application, allowing cross-account access for an ISV. This operation creates a new policy statement for the specified Amazon Q Business application. The policy statement defines the IAM actions that the ISV is allowed to perform on the Amazon Q Business application's resources.
" }, "BatchDeleteDocument":{ "name":"BatchDeleteDocument", @@ -73,6 +73,25 @@ ], "documentation":"Adds one or more documents to an Amazon Q Business index.
You use this API to:
ingest your structured and unstructured documents and documents stored in an Amazon S3 bucket into an Amazon Q Business index.
add custom attributes to documents in an Amazon Q Business index.
attach an access control list to the documents added to an Amazon Q Business index.
You can see the progress of the deletion, and any error messages related to the process, by using CloudWatch.
" }, + "CancelSubscription":{ + "name":"CancelSubscription", + "http":{ + "method":"DELETE", + "requestUri":"/applications/{applicationId}/subscriptions/{subscriptionId}", + "responseCode":200 + }, + "input":{"shape":"CancelSubscriptionRequest"}, + "output":{"shape":"CancelSubscriptionResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"Unsubscribes a user or a group from their pricing tier in an Amazon Q Business application. An unsubscribed user or group loses all Amazon Q Business feature access at the start of next month.
", + "idempotent":true + }, "Chat":{ "name":"Chat", "http":{ @@ -154,7 +173,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"Creates a new data accessor for an ISV to access data from a Q Business application. The data accessor is an entity that represents the ISV's access to the Q Business application's data. It includes the IAM role ARN for the ISV, a friendly name, and a set of action configurations that define the specific actions the ISV is allowed to perform and any associated data filters. When the data accessor is created, an AWS IAM Identity Center application is also created to manage the ISV's identity and authentication for accessing the Q Business application.
", + "documentation":"Creates a new data accessor for an ISV to access data from a Amazon Q Business application. The data accessor is an entity that represents the ISV's access to the Amazon Q Business application's data. It includes the IAM role ARN for the ISV, a friendly name, and a set of action configurations that define the specific actions the ISV is allowed to perform and any associated data filters. When the data accessor is created, an IAM Identity Center application is also created to manage the ISV's identity and authentication for accessing the Amazon Q Business application.
", "idempotent":true }, "CreateDataSource":{ @@ -239,6 +258,26 @@ ], "documentation":"Adds a retriever to your Amazon Q Business application.
" }, + "CreateSubscription":{ + "name":"CreateSubscription", + "http":{ + "method":"POST", + "requestUri":"/applications/{applicationId}/subscriptions", + "responseCode":200 + }, + "input":{"shape":"CreateSubscriptionRequest"}, + "output":{"shape":"CreateSubscriptionResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"Subscribes an IAM Identity Center user or a group to a pricing tier for an Amazon Q Business application.
Amazon Q Business offers two subscription tiers: Q_LITE
and Q_BUSINESS
. Subscription tier determines feature access for the user. For more information on subscriptions and pricing tiers, see Amazon Q Business pricing.
Deletes a specified data accessor. This operation permanently removes the data accessor and its associated AWS IAM Identity Center application. Any access granted to the ISV through this data accessor will be revoked
", + "documentation":"Deletes a specified data accessor. This operation permanently removes the data accessor and its associated IAM Identity Center application. Any access granted to the ISV through this data accessor will be revoked.
", "idempotent":true }, "DeleteDataSource":{ @@ -516,7 +555,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"Removes a permission policy from a Q Business application, revoking the cross-account access that was previously granted to an ISV. This operation deletes the specified policy statement from the application's permission policy.
", + "documentation":"Removes a permission policy from a Amazon Q Business application, revoking the cross-account access that was previously granted to an ISV. This operation deletes the specified policy statement from the application's permission policy.
", "idempotent":true }, "GetApplication":{ @@ -571,7 +610,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"Retrieves information about a specified data accessor. This operation returns details about the data accessor, including its display name, unique identifier, Amazon Resource Name (ARN), the associated Q Business application and AWS IAM Identity Center application, the IAM role for the ISV, the action configurations, and the timestamps for when the data accessor was created and last updated.
" + "documentation":"Retrieves information about a specified data accessor. This operation returns details about the data accessor, including its display name, unique identifier, Amazon Resource Name (ARN), the associated Amazon Q Business application and IAM Identity Center application, the IAM role for the ISV, the action configurations, and the timestamps for when the data accessor was created and last updated.
" }, "GetDataSource":{ "name":"GetDataSource", @@ -682,7 +721,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"Retrieves the current permission policy for a Q Business application. The policy is returned as a JSON-formatted string and defines the IAM actions that are allowed or denied for the application's resources.
" + "documentation":"Retrieves the current permission policy for a Amazon Q Business application. The policy is returned as a JSON-formatted string and defines the IAM actions that are allowed or denied for the application's resources.
" }, "GetRetriever":{ "name":"GetRetriever", @@ -810,7 +849,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"Lists the data accessors for a Q Business application. This operation returns a paginated list of data accessor summaries, including the friendly name, unique identifier, ARN, associated IAM role, and creation/update timestamps for each data accessor.
" + "documentation":"Lists the data accessors for a Amazon Q Business application. This operation returns a paginated list of data accessor summaries, including the friendly name, unique identifier, ARN, associated IAM role, and creation/update timestamps for each data accessor.
" }, "ListDataSourceSyncJobs":{ "name":"ListDataSourceSyncJobs", @@ -1011,6 +1050,25 @@ ], "documentation":"Lists the retriever used by an Amazon Q Business application.
" }, + "ListSubscriptions":{ + "name":"ListSubscriptions", + "http":{ + "method":"GET", + "requestUri":"/applications/{applicationId}/subscriptions", + "responseCode":200 + }, + "input":{"shape":"ListSubscriptionsRequest"}, + "output":{"shape":"ListSubscriptionsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"Lists all subscriptions created in an Amazon Q Business application.
" + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -1082,7 +1140,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"Create, or updates, a mapping of users—who have access to a document—to groups.
You can also map sub groups to groups. For example, the group \"Company Intellectual Property Teams\" includes sub groups \"Research\" and \"Engineering\". These sub groups include their own list of users or people who work in these teams. Only users who work in research and engineering, and therefore belong in the intellectual property group, can see top-secret company documents in their Amazon Q Business chat results.
", + "documentation":"Create, or updates, a mapping of users—who have access to a document—to groups.
You can also map sub groups to groups. For example, the group \"Company Intellectual Property Teams\" includes sub groups \"Research\" and \"Engineering\". These sub groups include their own list of users or people who work in these teams. Only users who work in research and engineering, and therefore belong in the intellectual property group, can see top-secret company documents in their Amazon Q Business chat results.
There are two options for creating groups, either passing group members inline or using an S3 file via the S3PathForGroupMembers field. For inline groups, there is a limit of 1000 members per group and for provided S3 files there is a limit of 100 thousand members. When creating a group using an S3 file, you provide both an S3 file and a RoleArn
for Amazon Q Buisness to access the file.
Searches for relevant content in a Q Business application based on a query. This operation takes a search query text, the Q Business application identifier, and optional filters (such as content source and maximum results) as input. It returns a list of relevant content items, where each item includes the content text, the unique document identifier, the document title, the document URI, any relevant document attributes, and score attributes indicating the confidence level of the relevance.
" + "documentation":"Searches for relevant content in a Amazon Q Business application based on a query. This operation takes a search query text, the Amazon Q Business application identifier, and optional filters (such as content source and maximum results) as input. It returns a list of relevant content items, where each item includes the content text, the unique document identifier, the document title, the document URI, any relevant document attributes, and score attributes indicating the confidence level of the relevance.
" }, "StartDataSourceSyncJob":{ "name":"StartDataSourceSyncJob", @@ -1326,6 +1384,26 @@ "documentation":"Updates the retriever used for your Amazon Q Business application.
", "idempotent":true }, + "UpdateSubscription":{ + "name":"UpdateSubscription", + "http":{ + "method":"PUT", + "requestUri":"/applications/{applicationId}/subscriptions/{subscriptionId}", + "responseCode":200 + }, + "input":{"shape":"UpdateSubscriptionRequest"}, + "output":{"shape":"UpdateSubscriptionResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"Updates the pricing tier for an Amazon Q Business subscription. Upgrades are instant. Downgrades apply at the start of the next month. Subscription tier determines feature access for the user. For more information on subscriptions and pricing tiers, see Amazon Q Business pricing.
", + "idempotent":true + }, "UpdateUser":{ "name":"UpdateUser", "http":{ @@ -1440,7 +1518,7 @@ "members":{ "action":{ "shape":"QIamAction", - "documentation":"The Q Business action that is allowed.
" + "documentation":"The Amazon Q Business action that is allowed.
" }, "filterConfiguration":{ "shape":"ActionFilterConfiguration", @@ -1807,7 +1885,7 @@ "members":{ "applicationId":{ "shape":"ApplicationId", - "documentation":"The unique identifier of the Q Business application.
", + "documentation":"The unique identifier of the Amazon Q Business application.
", "location":"uri", "locationName":"applicationId" }, @@ -1817,11 +1895,11 @@ }, "actions":{ "shape":"QIamActions", - "documentation":"The list of Q Business actions that the ISV is allowed to perform.
" + "documentation":"The list of Amazon Q Business actions that the ISV is allowed to perform.
" }, "principal":{ "shape":"PrincipalRoleArn", - "documentation":"The Amazon Resource Name (ARN) of the IAM role for the ISV that is being granted permission.
" + "documentation":"The Amazon Resource Name of the IAM role for the ISV that is being granted permission.
" } } }, @@ -2303,6 +2381,44 @@ "max":2, "min":0 }, + "CancelSubscriptionRequest":{ + "type":"structure", + "required":[ + "applicationId", + "subscriptionId" + ], + "members":{ + "applicationId":{ + "shape":"ApplicationId", + "documentation":"The identifier of the Amazon Q Business application for which the subscription is being cancelled.
", + "location":"uri", + "locationName":"applicationId" + }, + "subscriptionId":{ + "shape":"SubscriptionId", + "documentation":"The identifier of the Amazon Q Business subscription being cancelled.
", + "location":"uri", + "locationName":"subscriptionId" + } + } + }, + "CancelSubscriptionResponse":{ + "type":"structure", + "members":{ + "subscriptionArn":{ + "shape":"SubscriptionArn", + "documentation":"The Amazon Resource Name (ARN) of the Amazon Q Business subscription being cancelled.
" + }, + "currentSubscription":{ + "shape":"SubscriptionDetails", + "documentation":"The type of your current Amazon Q Business subscription.
" + }, + "nextSubscription":{ + "shape":"SubscriptionDetails", + "documentation":"The type of the Amazon Q Business subscription for the next month.
" + } + } + }, "ChatInput":{ "type":"structure", "required":["applicationId"], @@ -2795,7 +2911,7 @@ "members":{ "applicationId":{ "shape":"ApplicationId", - "documentation":"The unique identifier of the Q Business application.
", + "documentation":"The unique identifier of the Amazon Q Business application.
", "location":"uri", "locationName":"applicationId" }, @@ -2836,7 +2952,7 @@ }, "idcApplicationArn":{ "shape":"IdcApplicationArn", - "documentation":"The Amazon Resource Name (ARN) of the AWS IAM Identity Center application created for this data accessor.
" + "documentation":"The Amazon Resource Name (ARN) of the IAM Identity Center application created for this data accessor.
" }, "dataAccessorArn":{ "shape":"DataAccessorArn", @@ -3083,6 +3199,56 @@ } } }, + "CreateSubscriptionRequest":{ + "type":"structure", + "required":[ + "applicationId", + "principal", + "type" + ], + "members":{ + "applicationId":{ + "shape":"ApplicationId", + "documentation":"The identifier of the Amazon Q Business application the subscription should be added to.
", + "location":"uri", + "locationName":"applicationId" + }, + "principal":{ + "shape":"SubscriptionPrincipal", + "documentation":"The IAM Identity Center UserId
or GroupId
of a user or group in the IAM Identity Center instance connected to the Amazon Q Business application.
The type of Amazon Q Business subscription you want to create.
" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"A token that you provide to identify the request to create a subscription for your Amazon Q Business application.
", + "idempotencyToken":true + } + } + }, + "CreateSubscriptionResponse":{ + "type":"structure", + "members":{ + "subscriptionId":{ + "shape":"SubscriptionId", + "documentation":"The identifier of the Amazon Q Business subscription created.
" + }, + "subscriptionArn":{ + "shape":"SubscriptionArn", + "documentation":"The Amazon Resource Name (ARN) of the Amazon Q Business subscription created.
" + }, + "currentSubscription":{ + "shape":"SubscriptionDetails", + "documentation":"The type of your current Amazon Q Business subscription.
" + }, + "nextSubscription":{ + "shape":"SubscriptionDetails", + "documentation":"The type of the Amazon Q Business subscription for the next month.
" + } + } + }, "CreateUserRequest":{ "type":"structure", "required":[ @@ -3278,7 +3444,7 @@ }, "idcApplicationArn":{ "shape":"IdcApplicationArn", - "documentation":"The Amazon Resource Name (ARN) of the associated AWS IAM Identity Center application.
" + "documentation":"The Amazon Resource Name (ARN) of the associated IAM Identity Center application.
" }, "principal":{ "shape":"PrincipalRoleArn", @@ -3584,7 +3750,7 @@ "members":{ "applicationId":{ "shape":"ApplicationId", - "documentation":"The unique identifier of the Q Business application.
", + "documentation":"The unique identifier of the Amazon Q Business application.
", "location":"uri", "locationName":"applicationId" }, @@ -3833,7 +3999,7 @@ "members":{ "applicationId":{ "shape":"ApplicationId", - "documentation":"The unique identifier of the Q Business application.
", + "documentation":"The unique identifier of the Amazon Q Business application.
", "location":"uri", "locationName":"applicationId" }, @@ -4459,7 +4625,7 @@ "members":{ "applicationId":{ "shape":"ApplicationId", - "documentation":"The unique identifier of the Q Business application.
", + "documentation":"The unique identifier of the Amazon Q Business application.
", "location":"uri", "locationName":"applicationId" }, @@ -4488,11 +4654,11 @@ }, "applicationId":{ "shape":"ApplicationId", - "documentation":"The unique identifier of the Q Business application associated with this data accessor.
" + "documentation":"The unique identifier of the Amazon Q Business application associated with this data accessor.
" }, "idcApplicationArn":{ "shape":"IdcApplicationArn", - "documentation":"The Amazon Resource Name (ARN) of the AWS IAM Identity Center application associated with this data accessor.
" + "documentation":"The Amazon Resource Name (ARN) of the IAM Identity Center application associated with this data accessor.
" }, "principal":{ "shape":"PrincipalRoleArn", @@ -4860,7 +5026,7 @@ "members":{ "applicationId":{ "shape":"ApplicationId", - "documentation":"The unique identifier of the Q Business application.
", + "documentation":"The unique identifier of the Amazon Q Business application.
", "location":"uri", "locationName":"applicationId" } @@ -5068,6 +5234,12 @@ } } }, + "GroupIdentifier":{ + "type":"string", + "max":47, + "min":1, + "pattern":"([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}" + }, "GroupMembers":{ "type":"structure", "members":{ @@ -5532,7 +5704,7 @@ "members":{ "applicationId":{ "shape":"ApplicationId", - "documentation":"The unique identifier of the Q Business application.
", + "documentation":"The unique identifier of the Amazon Q Business application.
", "location":"uri", "locationName":"applicationId" }, @@ -6071,6 +6243,43 @@ } } }, + "ListSubscriptionsRequest":{ + "type":"structure", + "required":["applicationId"], + "members":{ + "applicationId":{ + "shape":"ApplicationId", + "documentation":"The identifier of the Amazon Q Business application linked to the subscription.
", + "location":"uri", + "locationName":"applicationId" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"If the maxResults
response was incomplete because there is more data to retrieve, Amazon Q Business returns a pagination token in the response. You can use this pagination token to retrieve the next set of Amazon Q Business subscriptions.
The maximum number of Amazon Q Business subscriptions to return.
", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListSubscriptionsResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"If the response is truncated, Amazon Q Business returns this token. You can use this token in a subsequent request to retrieve the next set of subscriptions.
" + }, + "subscriptions":{ + "shape":"Subscriptions", + "documentation":"An array of summary information on the subscriptions configured for an Amazon Q Business application.
" + } + } + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["resourceARN"], @@ -6239,6 +6448,12 @@ "max":50, "min":1 }, + "MaxResultsIntegerForListSubscriptions":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, "MaxResultsIntegerForListWebExperiencesRequest":{ "type":"integer", "box":true, @@ -6903,7 +7118,7 @@ "groupMembers":{"shape":"GroupMembers"}, "roleArn":{ "shape":"RoleArn", - "documentation":"The Amazon Resource Name (ARN) of an IAM role that has access to the S3 file that contains your list of users that belong to a group.The Amazon Resource Name (ARN) of an IAM role that has access to the S3 file that contains your list of users that belong to a group.
" + "documentation":"The Amazon Resource Name (ARN) of an IAM role that has access to the S3 file that contains your list of users that belong to a group.
" } } }, @@ -7285,7 +7500,7 @@ "members":{ "applicationId":{ "shape":"ApplicationId", - "documentation":"The unique identifier of the Q Business application to search.
", + "documentation":"The unique identifier of the Amazon Q Business application to search.
", "location":"uri", "locationName":"applicationId" }, @@ -7558,6 +7773,68 @@ "type":"list", "member":{"shape":"SubnetId"} }, + "Subscription":{ + "type":"structure", + "members":{ + "subscriptionId":{ + "shape":"SubscriptionId", + "documentation":"The identifier of the Amazon Q Business subscription to be updated.
" + }, + "subscriptionArn":{ + "shape":"SubscriptionArn", + "documentation":"The Amazon Resource Name (ARN) of the Amazon Q Business subscription that was updated.
" + }, + "principal":{ + "shape":"SubscriptionPrincipal", + "documentation":"The IAM Identity Center UserId
or GroupId
of a user or group in the IAM Identity Center instance connected to the Amazon Q Business application.
The type of your current Amazon Q Business subscription.
" + }, + "nextSubscription":{ + "shape":"SubscriptionDetails", + "documentation":"The type of the Amazon Q Business subscription for the next month.
" + } + }, + "documentation":"Information about an Amazon Q Business subscription.
Subscriptions are used to provide access for an IAM Identity Center user or a group to an Amazon Q Business application.
Amazon Q Business offers two subscription tiers: Q_LITE
and Q_BUSINESS
. Subscription tier determines feature access for the user. For more information on subscriptions and pricing tiers, see Amazon Q Business pricing.
The type of an Amazon Q Business subscription.
" + } + }, + "documentation":"The details of an Amazon Q Business subscription.
" + }, + "SubscriptionId":{ + "type":"string", + "max":1224, + "min":0 + }, + "SubscriptionPrincipal":{ + "type":"structure", + "members":{ + "user":{ + "shape":"UserIdentifier", + "documentation":"The identifier of a user in the IAM Identity Center instance connected to the Amazon Q Business application.
" + }, + "group":{ + "shape":"GroupIdentifier", + "documentation":"The identifier of a group in the IAM Identity Center instance connected to the Amazon Q Business application.
" + } + }, + "documentation":"A user or group in the IAM Identity Center instance connected to the Amazon Q Business application.
", + "union":true + }, "SubscriptionType":{ "type":"string", "enum":[ @@ -7565,6 +7842,10 @@ "Q_BUSINESS" ] }, + "Subscriptions":{ + "type":"list", + "member":{"shape":"Subscription"} + }, "SyncSchedule":{ "type":"string", "max":998, @@ -7916,7 +8197,7 @@ "members":{ "applicationId":{ "shape":"ApplicationId", - "documentation":"The unique identifier of the Q Business application.
", + "documentation":"The unique identifier of the Amazon Q Business application.
", "location":"uri", "locationName":"applicationId" }, @@ -8120,6 +8401,49 @@ "members":{ } }, + "UpdateSubscriptionRequest":{ + "type":"structure", + "required":[ + "applicationId", + "subscriptionId", + "type" + ], + "members":{ + "applicationId":{ + "shape":"ApplicationId", + "documentation":"The identifier of the Amazon Q Business application where the subscription update should take effect.
", + "location":"uri", + "locationName":"applicationId" + }, + "subscriptionId":{ + "shape":"SubscriptionId", + "documentation":"The identifier of the Amazon Q Business subscription to be updated.
", + "location":"uri", + "locationName":"subscriptionId" + }, + "type":{ + "shape":"SubscriptionType", + "documentation":"The type of the Amazon Q Business subscription to be updated.
" + } + } + }, + "UpdateSubscriptionResponse":{ + "type":"structure", + "members":{ + "subscriptionArn":{ + "shape":"SubscriptionArn", + "documentation":"The Amazon Resource Name (ARN) of the Amazon Q Business subscription that was updated.
" + }, + "currentSubscription":{ + "shape":"SubscriptionDetails", + "documentation":"The type of your current Amazon Q Business subscription.
" + }, + "nextSubscription":{ + "shape":"SubscriptionDetails", + "documentation":"The type of the Amazon Q Business subscription for the next month.
" + } + } + }, "UpdateUserRequest":{ "type":"structure", "required":[ @@ -8273,6 +8597,12 @@ "min":1, "pattern":"\\P{C}*" }, + "UserIdentifier":{ + "type":"string", + "max":47, + "min":1, + "pattern":"([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}" + }, "UserIds":{ "type":"list", "member":{"shape":"String"} diff --git a/botocore/data/quicksight/2018-04-01/service-2.json b/botocore/data/quicksight/2018-04-01/service-2.json index 2668fd01ea..fe9f13996b 100644 --- a/botocore/data/quicksight/2018-04-01/service-2.json +++ b/botocore/data/quicksight/2018-04-01/service-2.json @@ -16920,6 +16920,13 @@ }, "documentation":"The configuration of destination parameter values.
This is a union type structure. For this structure to be valid, only one of the attributes can be defined.
" }, + "DigitGroupingStyle":{ + "type":"string", + "enum":[ + "DEFAULT", + "LAKHS" + ] + }, "DimensionField":{ "type":"structure", "members":{ @@ -25217,7 +25224,9 @@ "THOUSANDS", "MILLIONS", "BILLIONS", - "TRILLIONS" + "TRILLIONS", + "LAKHS", + "CRORES" ] }, "NumericAxisOptions":{ @@ -31878,7 +31887,7 @@ "TableFieldOptionList":{ "type":"list", "member":{"shape":"TableFieldOption"}, - "max":100 + "max":201 }, "TableFieldOptions":{ "type":"structure", @@ -32089,11 +32098,16 @@ "SCROLLED" ] }, + "TableUnaggregatedFieldList":{ + "type":"list", + "member":{"shape":"UnaggregatedField"}, + "max":201 + }, "TableUnaggregatedFieldWells":{ "type":"structure", "members":{ "Values":{ - "shape":"UnaggregatedFieldList", + "shape":"TableUnaggregatedFieldList", "documentation":"The values field well for a pivot table. Values are unaggregated for an unaggregated table.
" } }, @@ -32846,6 +32860,10 @@ "Visibility":{ "shape":"Visibility", "documentation":"Determines the visibility of the thousands separator.
" + }, + "GroupingStyle":{ + "shape":"DigitGroupingStyle", + "documentation":"Determines the way numbers are styled to accommodate different readability standards. The DEFAULT
value uses the standard international grouping system and groups numbers by the thousands. The LAKHS
value uses the Indian numbering system and groups numbers by lakhs and crores.
The options that determine the thousands separator configuration.
" diff --git a/botocore/data/rds/2014-10-31/service-2.json b/botocore/data/rds/2014-10-31/service-2.json index f9b93f2fae..3b5e379f81 100644 --- a/botocore/data/rds/2014-10-31/service-2.json +++ b/botocore/data/rds/2014-10-31/service-2.json @@ -2965,7 +2965,7 @@ {"shape":"SnapshotQuotaExceededFault"}, {"shape":"InvalidDBClusterStateFault"} ], - "documentation":"Stops an Amazon RDS DB instance. When you stop a DB instance, Amazon RDS retains the DB instance's metadata, including its endpoint, DB parameter group, and option group membership. Amazon RDS also retains the transaction logs so you can do a point-in-time restore if necessary.
For more information, see Stopping an Amazon RDS DB Instance Temporarily in the Amazon RDS User Guide.
This command doesn't apply to RDS Custom, Aurora MySQL, and Aurora PostgreSQL. For Aurora clusters, use StopDBCluster
instead.
Stops an Amazon RDS DB instance temporarily. When you stop a DB instance, Amazon RDS retains the DB instance's metadata, including its endpoint, DB parameter group, and option group membership. Amazon RDS also retains the transaction logs so you can do a point-in-time restore if necessary. The instance restarts automatically after 7 days.
For more information, see Stopping an Amazon RDS DB Instance Temporarily in the Amazon RDS User Guide.
This command doesn't apply to RDS Custom, Aurora MySQL, and Aurora PostgreSQL. For Aurora clusters, use StopDBCluster
instead.
The list of log types that need to be enabled for exporting to CloudWatch Logs.
Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters
The following values are valid for each DB engine:
Aurora MySQL - audit | error | general | slowquery
Aurora PostgreSQL - postgresql
RDS for MySQL - error | general | slowquery
RDS for PostgreSQL - postgresql | upgrade
For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.
For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.
" + "documentation":"The list of log types that need to be enabled for exporting to CloudWatch Logs.
Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters
The following values are valid for each DB engine:
Aurora MySQL - audit | error | general | instance | slowquery
Aurora PostgreSQL - instance | postgresql
RDS for MySQL - error | general | slowquery
RDS for PostgreSQL - postgresql | upgrade
For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.
For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.
" }, "EngineMode":{ "shape":"String", @@ -4665,7 +4665,7 @@ }, "StorageThroughput":{ "shape":"IntegerOptional", - "documentation":"The storage throughput value for the DB instance.
This setting applies only to the gp3
storage type.
This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.
" + "documentation":"The storage throughput value, in mebibyte per second (MiBps), for the DB instance.
This setting applies only to the gp3
storage type.
This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.
" }, "ManageMasterUserPassword":{ "shape":"BooleanOptional", @@ -5596,7 +5596,7 @@ }, "CloneGroupId":{ "shape":"String", - "documentation":"The ID of the clone group with which the DB cluster is associated.
" + "documentation":"The ID of the clone group with which the DB cluster is associated. For newly created clusters, the ID is typically null.
If you clone a DB cluster when the ID is null, the operation populates the ID value for the source cluster and the clone because both clusters become part of the same clone group. Even if you delete the clone cluster, the clone group ID remains for the lifetime of the source cluster to show that it was used in a cloning operation.
For PITR, the clone group ID is inherited from the source cluster. For snapshot restore operations, the clone group ID isn't inherited from the source cluster.
" }, "ClusterCreateTime":{ "shape":"TStamp", @@ -12335,7 +12335,7 @@ }, "CloudwatchLogsExportConfiguration":{ "shape":"CloudwatchLogsExportConfiguration", - "documentation":"The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB cluster.
Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters
The following values are valid for each DB engine:
Aurora MySQL - audit | error | general | slowquery
Aurora PostgreSQL - postgresql
RDS for MySQL - error | general | slowquery
RDS for PostgreSQL - postgresql | upgrade
For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.
For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.
" + "documentation":"The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB cluster.
Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters
The following values are valid for each DB engine:
Aurora MySQL - audit | error | general | instance | slowquery
Aurora PostgreSQL - instance | postgresql
RDS for MySQL - error | general | slowquery
RDS for PostgreSQL - postgresql | upgrade
For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.
For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.
" }, "EngineVersion":{ "shape":"String", @@ -14971,7 +14971,7 @@ }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values in the list depend on the DB engine being used.
Aurora MySQL
Possible values are audit
, error
, general
, and slowquery
.
For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.
" + "documentation":"The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values in the list depend on the DB engine being used.
Aurora MySQL
Possible values are audit
, error
, general
, instance
, and slowquery
.
Aurora PostgreSQL
Possible value are instance
and postgresql
.
For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.
For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.
" }, "DeletionProtection":{ "shape":"BooleanOptional", @@ -15084,7 +15084,7 @@ }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"The list of logs that the restored DB cluster is to export to Amazon CloudWatch Logs. The values in the list depend on the DB engine being used.
RDS for MySQL
Possible values are error
, general
, and slowquery
.
RDS for PostgreSQL
Possible values are postgresql
and upgrade
.
Aurora MySQL
Possible values are audit
, error
, general
, and slowquery
.
Aurora PostgreSQL
Possible value is postgresql
.
For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.
For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.
Valid for: Aurora DB clusters and Multi-AZ DB clusters
" + "documentation":"The list of logs that the restored DB cluster is to export to Amazon CloudWatch Logs. The values in the list depend on the DB engine being used.
RDS for MySQL
Possible values are error
, general
, and slowquery
.
RDS for PostgreSQL
Possible values are postgresql
and upgrade
.
Aurora MySQL
Possible values are audit
, error
, general
, instance
, and slowquery
.
Aurora PostgreSQL
Possible value are instance
and postgresql
.
For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.
For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.
Valid for: Aurora DB clusters and Multi-AZ DB clusters
" }, "EngineMode":{ "shape":"String", @@ -15227,7 +15227,7 @@ }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values in the list depend on the DB engine being used.
RDS for MySQL
Possible values are error
, general
, and slowquery
.
RDS for PostgreSQL
Possible values are postgresql
and upgrade
.
Aurora MySQL
Possible values are audit
, error
, general
, and slowquery
.
Aurora PostgreSQL
Possible value is postgresql
.
For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.
For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.
Valid for: Aurora DB clusters and Multi-AZ DB clusters
" + "documentation":"The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values in the list depend on the DB engine being used.
RDS for MySQL
Possible values are error
, general
, and slowquery
.
RDS for PostgreSQL
Possible values are postgresql
and upgrade
.
Aurora MySQL
Possible values are audit
, error
, general
, instance
, and slowquery
.
Aurora PostgreSQL
Possible value are instance
and postgresql
.
For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.
For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.
Valid for: Aurora DB clusters and Multi-AZ DB clusters
" }, "DBClusterParameterGroupName":{ "shape":"String", diff --git a/botocore/data/redshift/2012-12-01/service-2.json b/botocore/data/redshift/2012-12-01/service-2.json index 8de5a88329..d1a650cd5c 100644 --- a/botocore/data/redshift/2012-12-01/service-2.json +++ b/botocore/data/redshift/2012-12-01/service-2.json @@ -3271,7 +3271,7 @@ }, "PubliclyAccessible":{ "shape":"Boolean", - "documentation":"A boolean value that, if true
, indicates that the cluster can be accessed from a public network.
A boolean value that, if true
, indicates that the cluster can be accessed from a public network.
Default: false
" }, "Encrypted":{ "shape":"Boolean", @@ -4267,11 +4267,11 @@ }, "PubliclyAccessible":{ "shape":"BooleanOptional", - "documentation":"If true
, the cluster can be accessed from a public network.
If true
, the cluster can be accessed from a public network.
Default: false
" }, "Encrypted":{ "shape":"BooleanOptional", - "documentation":"If true
, the data in the cluster is encrypted at rest.
Default: false
" + "documentation":"If true
, the data in the cluster is encrypted at rest. If you set the value on this parameter to false
, the request will fail.
Default: true
" }, "HsmClientCertificateIdentifier":{ "shape":"String", @@ -8622,7 +8622,7 @@ }, "PubliclyAccessible":{ "shape":"BooleanOptional", - "documentation":"If true
, the cluster can be accessed from a public network. Only clusters in VPCs can be set to be publicly available.
If true
, the cluster can be accessed from a public network. Only clusters in VPCs can be set to be publicly available.
Default: false
" }, "ElasticIp":{ "shape":"String", @@ -10385,7 +10385,7 @@ }, "PubliclyAccessible":{ "shape":"BooleanOptional", - "documentation":"If true
, the cluster can be accessed from a public network.
If true
, the cluster can be accessed from a public network.
Default: false
" }, "OwnerAccount":{ "shape":"String", diff --git a/botocore/data/route53/2013-04-01/service-2.json b/botocore/data/route53/2013-04-01/service-2.json index 0b640508e4..cc303fc322 100644 --- a/botocore/data/route53/2013-04-01/service-2.json +++ b/botocore/data/route53/2013-04-01/service-2.json @@ -1797,6 +1797,7 @@ "il-central-1", "ca-west-1", "ap-southeast-5", + "mx-central-1", "ap-southeast-7" ], "max":64, @@ -5564,6 +5565,7 @@ "il-central-1", "ca-west-1", "ap-southeast-5", + "mx-central-1", "ap-southeast-7" ], "max":64, @@ -6444,6 +6446,7 @@ "il-central-1", "ca-west-1", "ap-southeast-5", + "mx-central-1", "ap-southeast-7" ], "max":64, diff --git a/botocore/data/s3/2006-03-01/paginators-1.sdk-extras.json b/botocore/data/s3/2006-03-01/paginators-1.sdk-extras.json index 262275b6be..39e13606ef 100644 --- a/botocore/data/s3/2006-03-01/paginators-1.sdk-extras.json +++ b/botocore/data/s3/2006-03-01/paginators-1.sdk-extras.json @@ -37,7 +37,8 @@ "ChecksumAlgorithm", "Initiator", "Owner", - "StorageClass" + "StorageClass", + "ChecksumType" ] } } diff --git a/botocore/data/s3/2006-03-01/service-2.json b/botocore/data/s3/2006-03-01/service-2.json index 6890770d93..3ac1958d3f 100644 --- a/botocore/data/s3/2006-03-01/service-2.json +++ b/botocore/data/s3/2006-03-01/service-2.json @@ -28,7 +28,7 @@ {"shape":"NoSuchUpload"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadAbort.html", - "documentation":"This operation aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.
To verify that all parts have been removed and prevent getting charged for the part storage, you should call the ListParts API operation and ensure that the parts list is empty.
Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. To delete these in-progress multipart uploads, use the ListMultipartUploads
operation to list the in-progress multipart uploads in the bucket and use the AbortMultipartUpload
operation to abort all the in-progress multipart uploads.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.
General purpose bucket permissions - For information about permissions required to use the multipart upload, see Multipart Upload and Permissions in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to AbortMultipartUpload
:
This operation aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.
To verify that all parts have been removed and prevent getting charged for the part storage, you should call the ListParts API operation and ensure that the parts list is empty.
Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. To delete these in-progress multipart uploads, use the ListMultipartUploads
operation to list the in-progress multipart uploads in the bucket and use the AbortMultipartUpload
operation to abort all the in-progress multipart uploads.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
General purpose bucket permissions - For information about permissions required to use the multipart upload, see Multipart Upload and Permissions in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to AbortMultipartUpload
:
Completes a multipart upload by assembling previously uploaded parts.
You first initiate the multipart upload and then upload all parts using the UploadPart operation or the UploadPartCopy operation. After successfully uploading all relevant parts of an upload, you call this CompleteMultipartUpload
operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the CompleteMultipartUpload request, you must provide the parts list and ensure that the parts list is complete. The CompleteMultipartUpload API operation concatenates the parts that you provide in the list. For each part in the list, you must provide the PartNumber
value and the ETag
value that are returned after that part was uploaded.
The processing of a CompleteMultipartUpload request could take several minutes to finalize. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK
response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. A request could fail after the initial 200 OK
response has been sent. This means that a 200 OK
response can contain either a success or an error. The error response might be embedded in the 200 OK
response. If you call this API operation directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error).
Note that if CompleteMultipartUpload
fails, applications should be prepared to retry any failed requests (including 500 error responses). For more information, see Amazon S3 Error Best Practices.
You can't use Content-Type: application/x-www-form-urlencoded
for the CompleteMultipartUpload requests. Also, if you don't provide a Content-Type
header, CompleteMultipartUpload
can still return a 200 OK
response.
For more information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.
General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.
If you provide an additional checksum value in your MultipartUpload
requests and the object is encrypted with Key Management Service, you must have permission to use the kms:Decrypt
action for the CompleteMultipartUpload
request to succeed.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
Error Code: EntityTooSmall
Description: Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 MB in size, except the last part.
HTTP Status Code: 400 Bad Request
Error Code: InvalidPart
Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified ETag might not have matched the uploaded part's ETag.
HTTP Status Code: 400 Bad Request
Error Code: InvalidPartOrder
Description: The list of parts was not in ascending order. The parts list must be specified in order by part number.
HTTP Status Code: 400 Bad Request
Error Code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to CompleteMultipartUpload
:
Completes a multipart upload by assembling previously uploaded parts.
You first initiate the multipart upload and then upload all parts using the UploadPart operation or the UploadPartCopy operation. After successfully uploading all relevant parts of an upload, you call this CompleteMultipartUpload
operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the CompleteMultipartUpload request, you must provide the parts list and ensure that the parts list is complete. The CompleteMultipartUpload API operation concatenates the parts that you provide in the list. For each part in the list, you must provide the PartNumber
value and the ETag
value that are returned after that part was uploaded.
The processing of a CompleteMultipartUpload request could take several minutes to finalize. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK
response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. A request could fail after the initial 200 OK
response has been sent. This means that a 200 OK
response can contain either a success or an error. The error response might be embedded in the 200 OK
response. If you call this API operation directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error).
Note that if CompleteMultipartUpload
fails, applications should be prepared to retry any failed requests (including 500 error responses). For more information, see Amazon S3 Error Best Practices.
You can't use Content-Type: application/x-www-form-urlencoded
for the CompleteMultipartUpload requests. Also, if you don't provide a Content-Type
header, CompleteMultipartUpload
can still return a 200 OK
response.
For more information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.
If you provide an additional checksum value in your MultipartUpload
requests and the object is encrypted with Key Management Service, you must have permission to use the kms:Decrypt
action for the CompleteMultipartUpload
request to succeed.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
Error Code: EntityTooSmall
Description: Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 MB in size, except the last part.
HTTP Status Code: 400 Bad Request
Error Code: InvalidPart
Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified ETag might not have matched the uploaded part's ETag.
HTTP Status Code: 400 Bad Request
Error Code: InvalidPartOrder
Description: The list of parts was not in ascending order. The parts list must be specified in order by part number.
HTTP Status Code: 400 Bad Request
Error Code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to CompleteMultipartUpload
:
Creates a copy of an object that is already stored in Amazon S3.
You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.
You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets.
Amazon S3 supports copy operations using Multi-Region Access Points only as a destination when using the Multi-Region Access Point ARN.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.
VPC endpoints don't support cross-Region requests (including copies). If you're using VPC endpoints, your source and destination buckets should be in the same Amazon Web Services Region as your VPC endpoint.
Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable or disable a Region for standalone accounts in the Amazon Web Services Account Management Guide.
Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request
error. For more information, see Transfer Acceleration.
All CopyObject
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed. For more information, see REST Authentication.
Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject
API operation, instead of using the temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
You must have read access to the source object and write access to the destination bucket.
General purpose bucket permissions - You must have permissions in an IAM policy based on the source and destination bucket types in a CopyObject
operation.
If the source object is in a general purpose bucket, you must have s3:GetObject
permission to read the source object that is being copied.
If the destination bucket is a general purpose bucket, you must have s3:PutObject
permission to write the object copy to the destination bucket.
Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in a CopyObject
operation.
If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to read the object. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the copy source bucket.
If the copy destination is a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to write the object to the destination. The s3express:SessionMode
condition key can't be set to ReadOnly
on the copy destination bucket.
If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length
. You always need to read the entire response body to check if the copy succeeds.
If the copy is successful, you receive a response with information about the copied object.
A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. A 200 OK
response can contain either a success or an error.
If the error occurs before the copy action starts, you receive a standard Amazon S3 error.
If the error occurs during the copy operation, the error response is embedded in the 200 OK
response. For example, in a cross-region copy, you may encounter throttling and receive a 200 OK
response. For more information, see Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK
status code means the copy was accepted, but it doesn't mean the copy is complete. Another example is when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK
response. You must stay connected to Amazon S3 until the entire response is successfully received and processed.
If you call this API operation directly, make sure to design your application to parse the content of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error).
The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see Amazon S3 pricing.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to CopyObject
:
Creates a copy of an object that is already stored in Amazon S3.
You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.
You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets.
Amazon S3 supports copy operations using Multi-Region Access Points only as a destination when using the Multi-Region Access Point ARN.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
VPC endpoints don't support cross-Region requests (including copies). If you're using VPC endpoints, your source and destination buckets should be in the same Amazon Web Services Region as your VPC endpoint.
Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable or disable a Region for standalone accounts in the Amazon Web Services Account Management Guide.
Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request
error. For more information, see Transfer Acceleration.
All CopyObject
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed. For more information, see REST Authentication.
Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject
API operation, instead of using the temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
You must have read access to the source object and write access to the destination bucket.
General purpose bucket permissions - You must have permissions in an IAM policy based on the source and destination bucket types in a CopyObject
operation.
If the source object is in a general purpose bucket, you must have s3:GetObject
permission to read the source object that is being copied.
If the destination bucket is a general purpose bucket, you must have s3:PutObject
permission to write the object copy to the destination bucket.
Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in a CopyObject
operation.
If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to read the object. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the copy source bucket.
If the copy destination is a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to write the object to the destination. The s3express:SessionMode
condition key can't be set to ReadOnly
on the copy destination bucket.
If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length
. You always need to read the entire response body to check if the copy succeeds.
If the copy is successful, you receive a response with information about the copied object.
A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. A 200 OK
response can contain either a success or an error.
If the error occurs before the copy action starts, you receive a standard Amazon S3 error.
If the error occurs during the copy operation, the error response is embedded in the 200 OK
response. For example, in a cross-region copy, you may encounter throttling and receive a 200 OK
response. For more information, see Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK
status code means the copy was accepted, but it doesn't mean the copy is complete. Another example is when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK
response. You must stay connected to Amazon S3 until the entire response is successfully received and processed.
If you call this API operation directly, make sure to design your application to parse the content of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error).
The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see Amazon S3 pricing.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to CopyObject
:
This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts bucket, see CreateBucket
.
Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and have a valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to create buckets. By creating the bucket, you become the bucket owner.
There are two types of buckets: general purpose buckets and directory buckets. For more information about these bucket types, see Creating, configuring, and working with Amazon S3 buckets in the Amazon S3 User Guide.
General purpose buckets - If you send your CreateBucket
request to the s3.amazonaws.com
global endpoint, the request goes to the us-east-1
Region. So the signature calculations in Signature Version 4 must use us-east-1
as the Region, even if the location constraint in the request specifies another Region where the bucket is to be created. If you create a bucket in a Region other than US East (N. Virginia), your application must be able to handle 307 redirect. For more information, see Virtual hosting of buckets in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.
General purpose bucket permissions - In addition to the s3:CreateBucket
permission, the following permissions are required in a policy when your CreateBucket
request includes specific headers:
Access control lists (ACLs) - In your CreateBucket
request, if you specify an access control list (ACL) and set it to public-read
, public-read-write
, authenticated-read
, or if you explicitly specify any other custom ACLs, both s3:CreateBucket
and s3:PutBucketAcl
permissions are required. In your CreateBucket
request, if you set the ACL to private
, or if you don't specify any ACLs, only the s3:CreateBucket
permission is required.
Object Lock - In your CreateBucket
request, if you set x-amz-bucket-object-lock-enabled
to true, the s3:PutBucketObjectLockConfiguration
and s3:PutBucketVersioning
permissions are required.
S3 Object Ownership - If your CreateBucket
request includes the x-amz-object-ownership
header, then the s3:PutBucketOwnershipControls
permission is required.
To set an ACL on a bucket as part of a CreateBucket
request, you must explicitly set S3 Object Ownership for the bucket to a different value than the default, BucketOwnerEnforced
. Additionally, if your desired bucket ACL grants public access, you must first create the bucket (without the bucket ACL) and then explicitly disable Block Public Access on the bucket before using PutBucketAcl
to set the ACL. If you try to create a bucket with a public ACL, the request will fail.
For the majority of modern use cases in S3, we recommend that you keep all Block Public Access settings enabled and keep ACLs disabled. If you would like to share data with users outside of your account, you can use bucket policies as needed. For more information, see Controlling ownership of objects and disabling ACLs for your bucket and Blocking public access to your Amazon S3 storage in the Amazon S3 User Guide.
S3 Block Public Access - If your specific use case requires granting public access to your S3 resources, you can disable Block Public Access. Specifically, you can create a new bucket with Block Public Access enabled, then separately call the DeletePublicAccessBlock
API. To use this operation, you must have the s3:PutBucketPublicAccessBlock
permission. For more information about S3 Block Public Access, see Blocking public access to your Amazon S3 storage in the Amazon S3 User Guide.
Directory bucket permissions - You must have the s3express:CreateBucket
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
The permissions for ACLs, Object Lock, S3 Object Ownership, and S3 Block Public Access are not supported for directory buckets. For directory buckets, all Block Public Access settings are enabled at the bucket level and S3 Object Ownership is set to Bucket owner enforced (ACLs disabled). These settings can't be modified.
For more information about permissions for creating and working with directory buckets, see Directory buckets in the Amazon S3 User Guide. For more information about supported S3 features for directory buckets, see Features of S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com
.
The following operations are related to CreateBucket
:
This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts bucket, see CreateBucket
.
Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and have a valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to create buckets. By creating the bucket, you become the bucket owner.
There are two types of buckets: general purpose buckets and directory buckets. For more information about these bucket types, see Creating, configuring, and working with Amazon S3 buckets in the Amazon S3 User Guide.
General purpose buckets - If you send your CreateBucket
request to the s3.amazonaws.com
global endpoint, the request goes to the us-east-1
Region. So the signature calculations in Signature Version 4 must use us-east-1
as the Region, even if the location constraint in the request specifies another Region where the bucket is to be created. If you create a bucket in a Region other than US East (N. Virginia), your application must be able to handle 307 redirect. For more information, see Virtual hosting of buckets in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
General purpose bucket permissions - In addition to the s3:CreateBucket
permission, the following permissions are required in a policy when your CreateBucket
request includes specific headers:
Access control lists (ACLs) - In your CreateBucket
request, if you specify an access control list (ACL) and set it to public-read
, public-read-write
, authenticated-read
, or if you explicitly specify any other custom ACLs, both s3:CreateBucket
and s3:PutBucketAcl
permissions are required. In your CreateBucket
request, if you set the ACL to private
, or if you don't specify any ACLs, only the s3:CreateBucket
permission is required.
Object Lock - In your CreateBucket
request, if you set x-amz-bucket-object-lock-enabled
to true, the s3:PutBucketObjectLockConfiguration
and s3:PutBucketVersioning
permissions are required.
S3 Object Ownership - If your CreateBucket
request includes the x-amz-object-ownership
header, then the s3:PutBucketOwnershipControls
permission is required.
To set an ACL on a bucket as part of a CreateBucket
request, you must explicitly set S3 Object Ownership for the bucket to a different value than the default, BucketOwnerEnforced
. Additionally, if your desired bucket ACL grants public access, you must first create the bucket (without the bucket ACL) and then explicitly disable Block Public Access on the bucket before using PutBucketAcl
to set the ACL. If you try to create a bucket with a public ACL, the request will fail.
For the majority of modern use cases in S3, we recommend that you keep all Block Public Access settings enabled and keep ACLs disabled. If you would like to share data with users outside of your account, you can use bucket policies as needed. For more information, see Controlling ownership of objects and disabling ACLs for your bucket and Blocking public access to your Amazon S3 storage in the Amazon S3 User Guide.
S3 Block Public Access - If your specific use case requires granting public access to your S3 resources, you can disable Block Public Access. Specifically, you can create a new bucket with Block Public Access enabled, then separately call the DeletePublicAccessBlock
API. To use this operation, you must have the s3:PutBucketPublicAccessBlock
permission. For more information about S3 Block Public Access, see Blocking public access to your Amazon S3 storage in the Amazon S3 User Guide.
Directory bucket permissions - You must have the s3express:CreateBucket
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
The permissions for ACLs, Object Lock, S3 Object Ownership, and S3 Block Public Access are not supported for directory buckets. For directory buckets, all Block Public Access settings are enabled at the bucket level and S3 Object Ownership is set to Bucket owner enforced (ACLs disabled). These settings can't be modified.
For more information about permissions for creating and working with directory buckets, see Directory buckets in the Amazon S3 User Guide. For more information about supported S3 features for directory buckets, see Features of S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com
.
The following operations are related to CreateBucket
:
This action initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request. For more information about multipart uploads, see Multipart Upload Overview in the Amazon S3 User Guide.
After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stops charging you for storing them only after you either complete or abort a multipart upload.
If you have configured a lifecycle rule to abort incomplete multipart uploads, the created multipart upload must be completed within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort action and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration.
Directory buckets - S3 Lifecycle is not supported by directory buckets.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.
For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon S3 User Guide.
General purpose bucket permissions - To perform a multipart upload with encryption using an Key Management Service (KMS) KMS key, the requester must have permission to the kms:Decrypt
and kms:GenerateDataKey
actions on the key. The requester must also have permissions for the kms:GenerateDataKey
action for the CreateMultipartUpload
API. Then, the requester needs permissions for the kms:Decrypt
action on the UploadPart
and UploadPartCopy
APIs. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see Multipart upload API and permissions and Protecting data using server-side encryption with Amazon Web Services KMS in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
General purpose buckets - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. Amazon S3 automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a multipart upload, if you don't specify encryption information in your request, the encryption setting of the uploaded parts is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded parts. When you perform a CreateMultipartUpload operation, if you want to use a different type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the object with a different encryption key (such as an Amazon S3 managed key, a KMS key, or a customer-provided key). When the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If you choose to provide your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the CreateMultipartUpload
request.
Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key (aws/s3
) and KMS customer managed keys stored in Key Management Service (KMS) – If you want Amazon Web Services to manage the keys used to encrypt data, specify the following headers in the request.
x-amz-server-side-encryption
x-amz-server-side-encryption-aws-kms-key-id
x-amz-server-side-encryption-context
If you specify x-amz-server-side-encryption:aws:kms
, but don't provide x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the Amazon Web Services managed key (aws/s3
key) in KMS to protect the data.
To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester must have permission to the kms:Decrypt
and kms:GenerateDataKey*
actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see Multipart upload API and permissions and Protecting data using server-side encryption with Amazon Web Services KMS in the Amazon S3 User Guide.
If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key, then you must have these permissions on the key policy. If your IAM user or role is in a different account from the key, then you must have the permissions on both the key policy and your IAM user or role.
All GET
and PUT
requests for an object protected by KMS fail if you don't make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), or Signature Version 4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.
For more information about server-side encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side Encryption with KMS keys in the Amazon S3 User Guide.
Use customer-provided encryption keys (SSE-C) – If you want to manage your own encryption keys, provide all the following headers in the request.
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about server-side encryption with customer-provided encryption keys (SSE-C), see Protecting data using server-side encryption with customer-provided encryption keys (SSE-C) in the Amazon S3 User Guide.
Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession
requests or PUT
object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.
In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, the encryption request headers must match the encryption settings that are specified in the CreateSession
request. You can't override the values of the encryption settings (x-amz-server-side-encryption
, x-amz-server-side-encryption-aws-kms-key-id
, x-amz-server-side-encryption-context
, and x-amz-server-side-encryption-bucket-key-enabled
) that are specified in the CreateSession
request. You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and Amazon S3 will use the encryption settings values from the CreateSession
request to protect new objects in the directory bucket.
When you use the CLI or the Amazon Web Services SDKs, for CreateSession
, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the CreateSession
request. It's not supported to override the encryption settings values in the CreateSession
request. So in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), the encryption request headers must match the default encryption configuration of the directory bucket.
For directory buckets, when you perform a CreateMultipartUpload
operation and an UploadPartCopy
operation, the request headers you provide in the CreateMultipartUpload
request must match the default encryption configuration of the destination bucket.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to CreateMultipartUpload
:
This action initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request. For more information about multipart uploads, see Multipart Upload Overview in the Amazon S3 User Guide.
After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stops charging you for storing them only after you either complete or abort a multipart upload.
If you have configured a lifecycle rule to abort incomplete multipart uploads, the created multipart upload must be completed within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort action and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration.
Directory buckets - S3 Lifecycle is not supported by directory buckets.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon S3 User Guide.
General purpose bucket permissions - To perform a multipart upload with encryption using an Key Management Service (KMS) KMS key, the requester must have permission to the kms:Decrypt
and kms:GenerateDataKey
actions on the key. The requester must also have permissions for the kms:GenerateDataKey
action for the CreateMultipartUpload
API. Then, the requester needs permissions for the kms:Decrypt
action on the UploadPart
and UploadPartCopy
APIs. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see Multipart upload API and permissions and Protecting data using server-side encryption with Amazon Web Services KMS in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
General purpose buckets - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. Amazon S3 automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a multipart upload, if you don't specify encryption information in your request, the encryption setting of the uploaded parts is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded parts. When you perform a CreateMultipartUpload operation, if you want to use a different type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the object with a different encryption key (such as an Amazon S3 managed key, a KMS key, or a customer-provided key). When the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If you choose to provide your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the CreateMultipartUpload
request.
Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key (aws/s3
) and KMS customer managed keys stored in Key Management Service (KMS) – If you want Amazon Web Services to manage the keys used to encrypt data, specify the following headers in the request.
x-amz-server-side-encryption
x-amz-server-side-encryption-aws-kms-key-id
x-amz-server-side-encryption-context
If you specify x-amz-server-side-encryption:aws:kms
, but don't provide x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the Amazon Web Services managed key (aws/s3
key) in KMS to protect the data.
To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester must have permission to the kms:Decrypt
and kms:GenerateDataKey*
actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see Multipart upload API and permissions and Protecting data using server-side encryption with Amazon Web Services KMS in the Amazon S3 User Guide.
If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key, then you must have these permissions on the key policy. If your IAM user or role is in a different account from the key, then you must have the permissions on both the key policy and your IAM user or role.
All GET
and PUT
requests for an object protected by KMS fail if you don't make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), or Signature Version 4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.
For more information about server-side encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side Encryption with KMS keys in the Amazon S3 User Guide.
Use customer-provided encryption keys (SSE-C) – If you want to manage your own encryption keys, provide all the following headers in the request.
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about server-side encryption with customer-provided encryption keys (SSE-C), see Protecting data using server-side encryption with customer-provided encryption keys (SSE-C) in the Amazon S3 User Guide.
Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession
requests or PUT
object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.
In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, the encryption request headers must match the encryption settings that are specified in the CreateSession
request. You can't override the values of the encryption settings (x-amz-server-side-encryption
, x-amz-server-side-encryption-aws-kms-key-id
, x-amz-server-side-encryption-context
, and x-amz-server-side-encryption-bucket-key-enabled
) that are specified in the CreateSession
request. You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and Amazon S3 will use the encryption settings values from the CreateSession
request to protect new objects in the directory bucket.
When you use the CLI or the Amazon Web Services SDKs, for CreateSession
, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the CreateSession
request. It's not supported to override the encryption settings values in the CreateSession
request. So in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), the encryption request headers must match the default encryption configuration of the directory bucket.
For directory buckets, when you perform a CreateMultipartUpload
operation and an UploadPartCopy
operation, the request headers you provide in the CreateMultipartUpload
request must match the default encryption configuration of the destination bucket.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to CreateMultipartUpload
:
Creates a session that establishes temporary security credentials to support fast authentication and authorization for the Zonal endpoint API operations on directory buckets. For more information about Zonal endpoint API operations that include the Availability Zone in the request endpoint, see S3 Express One Zone APIs in the Amazon S3 User Guide.
To make Zonal endpoint API requests on a directory bucket, use the CreateSession
API operation. Specifically, you grant s3express:CreateSession
permission to a bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM credentials to make the CreateSession
API request on the bucket, which returns temporary security credentials that include the access key ID, secret access key, session token, and expiration. These credentials have associated permissions to access the Zonal endpoint API operations. After the session is created, you don’t need to use other policies to grant permissions to each Zonal endpoint API individually. Instead, in your Zonal endpoint API requests, you sign your requests by applying the temporary security credentials of the session to the request headers and following the SigV4 protocol for authentication. You also apply the session token to the x-amz-s3session-token
request header for authorization. Temporary security credentials are scoped to the bucket and expire after 5 minutes. After the expiration time, any calls that you make with those credentials will fail. You must use IAM credentials again to make a CreateSession
API request that generates a new set of temporary credentials for use. Temporary credentials cannot be extended or refreshed beyond the original specified interval.
If you use Amazon Web Services SDKs, SDKs handle the session token refreshes automatically to avoid service interruptions when a session expires. We recommend that you use the Amazon Web Services SDKs to initiate and manage requests to the CreateSession API. For more information, see Performance guidelines and design patterns in the Amazon S3 User Guide.
You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.
CopyObject
API operation - Unlike other Zonal endpoint API operations, the CopyObject
API operation doesn't use the temporary security credentials returned from the CreateSession
API operation for authentication and authorization. For information about authentication and authorization of the CopyObject
API operation on directory buckets, see CopyObject.
HeadBucket
API operation - Unlike other Zonal endpoint API operations, the HeadBucket
API operation doesn't use the temporary security credentials returned from the CreateSession
API operation for authentication and authorization. For information about authentication and authorization of the HeadBucket
API operation on directory buckets, see HeadBucket.
To obtain temporary security credentials, you must create a bucket policy or an IAM identity-based policy that grants s3express:CreateSession
permission to the bucket. In a policy, you can have the s3express:SessionMode
condition key to control who can create a ReadWrite
or ReadOnly
session. For more information about ReadWrite
or ReadOnly
sessions, see x-amz-create-session-mode
. For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
To grant cross-account access to Zonal endpoint API operations, the bucket policy should also grant both accounts the s3express:CreateSession
permission.
If you want to encrypt objects with SSE-KMS, you must also have the kms:GenerateDataKey
and the kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the target KMS key.
For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession
requests or PUT
object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.
For Zonal endpoint (object-level) API operations except CopyObject and UploadPartCopy, you authenticate and authorize requests through CreateSession for low latency. To encrypt new objects in a directory bucket with SSE-KMS, you must specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). Then, when a session is created for Zonal endpoint API operations, new objects are automatically encrypted and decrypted with SSE-KMS and S3 Bucket Keys during the session.
Only 1 customer managed key is supported per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3
) isn't supported. After you specify SSE-KMS as your bucket's default encryption configuration with a customer managed key, you can't change the customer managed key for the bucket's SSE-KMS configuration.
In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, you can't override the values of the encryption settings (x-amz-server-side-encryption
, x-amz-server-side-encryption-aws-kms-key-id
, x-amz-server-side-encryption-context
, and x-amz-server-side-encryption-bucket-key-enabled
) from the CreateSession
request. You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and Amazon S3 will use the encryption settings values from the CreateSession
request to protect new objects in the directory bucket.
When you use the CLI or the Amazon Web Services SDKs, for CreateSession
, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the CreateSession
request. It's not supported to override the encryption settings values in the CreateSession
request. Also, in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), it's not supported to override the values of the encryption settings from the CreateSession
request.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
Creates a session that establishes temporary security credentials to support fast authentication and authorization for the Zonal endpoint API operations on directory buckets. For more information about Zonal endpoint API operations that include the Availability Zone in the request endpoint, see S3 Express One Zone APIs in the Amazon S3 User Guide.
To make Zonal endpoint API requests on a directory bucket, use the CreateSession
API operation. Specifically, you grant s3express:CreateSession
permission to a bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM credentials to make the CreateSession
API request on the bucket, which returns temporary security credentials that include the access key ID, secret access key, session token, and expiration. These credentials have associated permissions to access the Zonal endpoint API operations. After the session is created, you don’t need to use other policies to grant permissions to each Zonal endpoint API individually. Instead, in your Zonal endpoint API requests, you sign your requests by applying the temporary security credentials of the session to the request headers and following the SigV4 protocol for authentication. You also apply the session token to the x-amz-s3session-token
request header for authorization. Temporary security credentials are scoped to the bucket and expire after 5 minutes. After the expiration time, any calls that you make with those credentials will fail. You must use IAM credentials again to make a CreateSession
API request that generates a new set of temporary credentials for use. Temporary credentials cannot be extended or refreshed beyond the original specified interval.
If you use Amazon Web Services SDKs, SDKs handle the session token refreshes automatically to avoid service interruptions when a session expires. We recommend that you use the Amazon Web Services SDKs to initiate and manage requests to the CreateSession API. For more information, see Performance guidelines and design patterns in the Amazon S3 User Guide.
You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
CopyObject
API operation - Unlike other Zonal endpoint API operations, the CopyObject
API operation doesn't use the temporary security credentials returned from the CreateSession
API operation for authentication and authorization. For information about authentication and authorization of the CopyObject
API operation on directory buckets, see CopyObject.
HeadBucket
API operation - Unlike other Zonal endpoint API operations, the HeadBucket
API operation doesn't use the temporary security credentials returned from the CreateSession
API operation for authentication and authorization. For information about authentication and authorization of the HeadBucket
API operation on directory buckets, see HeadBucket.
To obtain temporary security credentials, you must create a bucket policy or an IAM identity-based policy that grants s3express:CreateSession
permission to the bucket. In a policy, you can have the s3express:SessionMode
condition key to control who can create a ReadWrite
or ReadOnly
session. For more information about ReadWrite
or ReadOnly
sessions, see x-amz-create-session-mode
. For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
To grant cross-account access to Zonal endpoint API operations, the bucket policy should also grant both accounts the s3express:CreateSession
permission.
If you want to encrypt objects with SSE-KMS, you must also have the kms:GenerateDataKey
and the kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the target KMS key.
For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession
requests or PUT
object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.
For Zonal endpoint (object-level) API operations except CopyObject and UploadPartCopy, you authenticate and authorize requests through CreateSession for low latency. To encrypt new objects in a directory bucket with SSE-KMS, you must specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). Then, when a session is created for Zonal endpoint API operations, new objects are automatically encrypted and decrypted with SSE-KMS and S3 Bucket Keys during the session.
Only 1 customer managed key is supported per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3
) isn't supported. After you specify SSE-KMS as your bucket's default encryption configuration with a customer managed key, you can't change the customer managed key for the bucket's SSE-KMS configuration.
In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, you can't override the values of the encryption settings (x-amz-server-side-encryption
, x-amz-server-side-encryption-aws-kms-key-id
, x-amz-server-side-encryption-context
, and x-amz-server-side-encryption-bucket-key-enabled
) from the CreateSession
request. You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and Amazon S3 will use the encryption settings values from the CreateSession
request to protect new objects in the directory bucket.
When you use the CLI or the Amazon Web Services SDKs, for CreateSession
, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the CreateSession
request. It's not supported to override the encryption settings values in the CreateSession
request. Also, in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), it's not supported to override the values of the encryption settings from the CreateSession
request.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
Deletes the S3 bucket. All objects (including all object versions and delete markers) in the bucket must be deleted before the bucket itself can be deleted.
Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.
General purpose bucket permissions - You must have the s3:DeleteBucket
permission on the specified bucket in a policy.
Directory bucket permissions - You must have the s3express:DeleteBucket
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com
.
The following operations are related to DeleteBucket
:
Deletes the S3 bucket. All objects (including all object versions and delete markers) in the bucket must be deleted before the bucket itself can be deleted.
Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
General purpose bucket permissions - You must have the s3:DeleteBucket
permission on the specified bucket in a policy.
Directory bucket permissions - You must have the s3express:DeleteBucket
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com
.
The following operations are related to DeleteBucket
:
Deletes the lifecycle configuration from the specified bucket. Amazon S3 removes all the lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 no longer automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration.
General purpose bucket permissions - By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must have the s3:PutLifecycleConfiguration
permission.
For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.
Directory bucket permissions - You must have the s3express:PutLifecycleConfiguration
permission in an IAM identity-based policy to use this operation. Cross-account access to this API operation isn't supported. The resource owner can optionally grant access permissions to others by creating a role or user for them as long as they are within the same account as the owner and resource.
For more information about directory bucket policies and permissions, see Authorizing Regional endpoint APIs with IAM in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
For more information about the object expiration, see Elements to Describe Lifecycle Actions.
Related actions include:
", + "documentation":"Deletes the lifecycle configuration from the specified bucket. Amazon S3 removes all the lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 no longer automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration.
General purpose bucket permissions - By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must have the s3:PutLifecycleConfiguration
permission.
For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.
Directory bucket permissions - You must have the s3express:PutLifecycleConfiguration
permission in an IAM identity-based policy to use this operation. Cross-account access to this API operation isn't supported. The resource owner can optionally grant access permissions to others by creating a role or user for them as long as they are within the same account as the owner and resource.
For more information about directory bucket policies and permissions, see Authorizing Regional endpoint APIs with IAM in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
For more information about the object expiration, see Elements to Describe Lifecycle Actions.
Related actions include:
", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} } @@ -265,7 +265,7 @@ }, "input":{"shape":"DeleteBucketPolicyRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEpolicy.html", - "documentation":"Deletes the policy of a specified bucket.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.
If you are using an identity other than the root user of the Amazon Web Services account that owns the bucket, the calling identity must both have the DeleteBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.
If you don't have DeleteBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
To ensure that bucket owners don't inadvertently lock themselves out of their own buckets, the root principal in a bucket owner's Amazon Web Services account can perform the GetBucketPolicy
, PutBucketPolicy
, and DeleteBucketPolicy
API actions, even if their bucket policy explicitly denies the root principal's access. Bucket owner root principals can only be blocked from performing these API actions by VPC endpoint policies and Amazon Web Services Organizations policies.
General purpose bucket permissions - The s3:DeleteBucketPolicy
permission is required in a policy. For more information about general purpose buckets bucket policies, see Using Bucket Policies and User Policies in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation, you must have the s3express:DeleteBucketPolicy
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com
.
The following operations are related to DeleteBucketPolicy
Deletes the policy of a specified bucket.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
If you are using an identity other than the root user of the Amazon Web Services account that owns the bucket, the calling identity must both have the DeleteBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.
If you don't have DeleteBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
To ensure that bucket owners don't inadvertently lock themselves out of their own buckets, the root principal in a bucket owner's Amazon Web Services account can perform the GetBucketPolicy
, PutBucketPolicy
, and DeleteBucketPolicy
API actions, even if their bucket policy explicitly denies the root principal's access. Bucket owner root principals can only be blocked from performing these API actions by VPC endpoint policies and Amazon Web Services Organizations policies.
General purpose bucket permissions - The s3:DeleteBucketPolicy
permission is required in a policy. For more information about general purpose buckets bucket policies, see Using Bucket Policies and User Policies in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation, you must have the s3express:DeleteBucketPolicy
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com
.
The following operations are related to DeleteBucketPolicy
Removes an object from a bucket. The behavior depends on the bucket's versioning state:
If bucket versioning is not enabled, the operation permanently deletes the object.
If bucket versioning is enabled, the operation inserts a delete marker, which becomes the current version of the object. To permanently delete an object in a versioned bucket, you must include the object’s versionId
in the request. For more information about versioning-enabled buckets, see Deleting object versions from a versioning-enabled bucket.
If bucket versioning is suspended, the operation removes the object that has a null versionId
, if there is one, and inserts a delete marker that becomes the current version of the object. If there isn't an object with a null versionId
, and all versions of the object have a versionId
, Amazon S3 does not remove the object and only inserts a delete marker. To permanently delete an object that has a versionId
, you must include the object’s versionId
in the request. For more information about versioning-suspended buckets, see Deleting objects from versioning-suspended buckets.
Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null
value of the version ID is supported by directory buckets. You can only specify null
to the versionId
query parameter in the request.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.
To remove a specific version, you must use the versionId
query parameter. Using this query parameter permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header x-amz-delete-marker
to true.
If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled, you must include the x-amz-mfa
request header in the DELETE versionId
request. Requests that include x-amz-mfa
must use HTTPS. For more information about MFA Delete, see Using MFA Delete in the Amazon S3 User Guide. To see sample requests that use versioning, see Sample Request.
Directory buckets - MFA delete is not supported by directory buckets.
You can delete objects by explicitly calling DELETE Object or calling (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the s3:DeleteObject
, s3:DeleteObjectVersion
, and s3:PutLifeCycleConfiguration
actions.
Directory buckets - S3 Lifecycle is not supported by directory buckets.
General purpose bucket permissions - The following permissions are required in your policies when your DeleteObjects
request includes specific headers.
s3:DeleteObject
- To delete an object from a bucket, you must always have the s3:DeleteObject
permission.
s3:DeleteObjectVersion
- To delete a specific version of an object from a versioning-enabled bucket, you must have the s3:DeleteObjectVersion
permission.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following action is related to DeleteObject
:
Removes an object from a bucket. The behavior depends on the bucket's versioning state:
If bucket versioning is not enabled, the operation permanently deletes the object.
If bucket versioning is enabled, the operation inserts a delete marker, which becomes the current version of the object. To permanently delete an object in a versioned bucket, you must include the object’s versionId
in the request. For more information about versioning-enabled buckets, see Deleting object versions from a versioning-enabled bucket.
If bucket versioning is suspended, the operation removes the object that has a null versionId
, if there is one, and inserts a delete marker that becomes the current version of the object. If there isn't an object with a null versionId
, and all versions of the object have a versionId
, Amazon S3 does not remove the object and only inserts a delete marker. To permanently delete an object that has a versionId
, you must include the object’s versionId
in the request. For more information about versioning-suspended buckets, see Deleting objects from versioning-suspended buckets.
Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null
value of the version ID is supported by directory buckets. You can only specify null
to the versionId
query parameter in the request.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
To remove a specific version, you must use the versionId
query parameter. Using this query parameter permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header x-amz-delete-marker
to true.
If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled, you must include the x-amz-mfa
request header in the DELETE versionId
request. Requests that include x-amz-mfa
must use HTTPS. For more information about MFA Delete, see Using MFA Delete in the Amazon S3 User Guide. To see sample requests that use versioning, see Sample Request.
Directory buckets - MFA delete is not supported by directory buckets.
You can delete objects by explicitly calling DELETE Object or calling (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the s3:DeleteObject
, s3:DeleteObjectVersion
, and s3:PutLifeCycleConfiguration
actions.
Directory buckets - S3 Lifecycle is not supported by directory buckets.
General purpose bucket permissions - The following permissions are required in your policies when your DeleteObjects
request includes specific headers.
s3:DeleteObject
- To delete an object from a bucket, you must always have the s3:DeleteObject
permission.
s3:DeleteObjectVersion
- To delete a specific version of an object from a versioning-enabled bucket, you must have the s3:DeleteObjectVersion
permission.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following action is related to DeleteObject
:
This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this operation provides a suitable alternative to sending individual delete requests, reducing per-request overhead.
The request can contain a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that delete, success or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted.
Directory buckets - S3 Versioning isn't enabled and supported for directory buckets.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.
The operation supports two modes for the response: verbose and quiet. By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion in a quiet mode, the operation does not return any information about the delete in the response body.
When performing this action on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete in the Amazon S3 User Guide.
Directory buckets - MFA delete is not supported by directory buckets.
General purpose bucket permissions - The following permissions are required in your policies when your DeleteObjects
request includes specific headers.
s3:DeleteObject
- To delete an object from a bucket, you must always specify the s3:DeleteObject
permission.
s3:DeleteObjectVersion
- To delete a specific version of an object from a versioning-enabled bucket, you must specify the s3:DeleteObjectVersion
permission.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
General purpose bucket - The Content-MD5 request header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit.
Directory bucket - The Content-MD5 request header or a additional checksum request header (including x-amz-checksum-crc32
, x-amz-checksum-crc32c
, x-amz-checksum-sha1
, or x-amz-checksum-sha256
) is required for all Multi-Object Delete requests.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to DeleteObjects
:
This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this operation provides a suitable alternative to sending individual delete requests, reducing per-request overhead.
The request can contain a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that delete, success or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted.
Directory buckets - S3 Versioning isn't enabled and supported for directory buckets.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
The operation supports two modes for the response: verbose and quiet. By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion in a quiet mode, the operation does not return any information about the delete in the response body.
When performing this action on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete in the Amazon S3 User Guide.
Directory buckets - MFA delete is not supported by directory buckets.
General purpose bucket permissions - The following permissions are required in your policies when your DeleteObjects
request includes specific headers.
s3:DeleteObject
- To delete an object from a bucket, you must always specify the s3:DeleteObject
permission.
s3:DeleteObjectVersion
- To delete a specific version of an object from a versioning-enabled bucket, you must specify the s3:DeleteObjectVersion
permission.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
General purpose bucket - The Content-MD5 request header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit.
Directory bucket - The Content-MD5 request header or a additional checksum request header (including x-amz-checksum-crc32
, x-amz-checksum-crc32c
, x-amz-checksum-sha1
, or x-amz-checksum-sha256
) is required for all Multi-Object Delete requests.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to DeleteObjects
:
Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.
Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, object size, or any combination of these. Accordingly, this section describes the latest API, which is compatible with the new functionality. The previous version of the API supported filtering based only on an object key name prefix, which is supported for general purpose buckets for backward compatibility. For the related API description, see GetBucketLifecycle.
Lifecyle configurations for directory buckets only support expiring objects and cancelling multipart uploads. Expiring of versioned objects, transitions and tag filters are not supported.
General purpose bucket permissions - By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must have the s3:GetLifecycleConfiguration
permission.
For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.
Directory bucket permissions - You must have the s3express:GetLifecycleConfiguration
permission in an IAM identity-based policy to use this operation. Cross-account access to this API operation isn't supported. The resource owner can optionally grant access permissions to others by creating a role or user for them as long as they are within the same account as the owner and resource.
For more information about directory bucket policies and permissions, see Authorizing Regional endpoint APIs with IAM in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
GetBucketLifecycleConfiguration
has the following special error:
Error code: NoSuchLifecycleConfiguration
Description: The lifecycle configuration does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
The following operations are related to GetBucketLifecycleConfiguration
:
Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.
Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, object size, or any combination of these. Accordingly, this section describes the latest API, which is compatible with the new functionality. The previous version of the API supported filtering based only on an object key name prefix, which is supported for general purpose buckets for backward compatibility. For the related API description, see GetBucketLifecycle.
Lifecyle configurations for directory buckets only support expiring objects and cancelling multipart uploads. Expiring of versioned objects, transitions and tag filters are not supported.
General purpose bucket permissions - By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must have the s3:GetLifecycleConfiguration
permission.
For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.
Directory bucket permissions - You must have the s3express:GetLifecycleConfiguration
permission in an IAM identity-based policy to use this operation. Cross-account access to this API operation isn't supported. The resource owner can optionally grant access permissions to others by creating a role or user for them as long as they are within the same account as the owner and resource.
For more information about directory bucket policies and permissions, see Authorizing Regional endpoint APIs with IAM in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
GetBucketLifecycleConfiguration
has the following special error:
Error code: NoSuchLifecycleConfiguration
Description: The lifecycle configuration does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
The following operations are related to GetBucketLifecycleConfiguration
:
Returns the policy of a specified bucket.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.
If you are using an identity other than the root user of the Amazon Web Services account that owns the bucket, the calling identity must both have the GetBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.
If you don't have GetBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
To ensure that bucket owners don't inadvertently lock themselves out of their own buckets, the root principal in a bucket owner's Amazon Web Services account can perform the GetBucketPolicy
, PutBucketPolicy
, and DeleteBucketPolicy
API actions, even if their bucket policy explicitly denies the root principal's access. Bucket owner root principals can only be blocked from performing these API actions by VPC endpoint policies and Amazon Web Services Organizations policies.
General purpose bucket permissions - The s3:GetBucketPolicy
permission is required in a policy. For more information about general purpose buckets bucket policies, see Using Bucket Policies and User Policies in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation, you must have the s3express:GetBucketPolicy
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
General purpose buckets example bucket policies - See Bucket policy examples in the Amazon S3 User Guide.
Directory bucket example bucket policies - See Example bucket policies for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com
.
The following action is related to GetBucketPolicy
:
Returns the policy of a specified bucket.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
If you are using an identity other than the root user of the Amazon Web Services account that owns the bucket, the calling identity must both have the GetBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.
If you don't have GetBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
To ensure that bucket owners don't inadvertently lock themselves out of their own buckets, the root principal in a bucket owner's Amazon Web Services account can perform the GetBucketPolicy
, PutBucketPolicy
, and DeleteBucketPolicy
API actions, even if their bucket policy explicitly denies the root principal's access. Bucket owner root principals can only be blocked from performing these API actions by VPC endpoint policies and Amazon Web Services Organizations policies.
General purpose bucket permissions - The s3:GetBucketPolicy
permission is required in a policy. For more information about general purpose buckets bucket policies, see Using Bucket Policies and User Policies in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation, you must have the s3express:GetBucketPolicy
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
General purpose buckets example bucket policies - See Bucket policy examples in the Amazon S3 User Guide.
Directory bucket example bucket policies - See Example bucket policies for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com
.
The following action is related to GetBucketPolicy
:
Retrieves an object from Amazon S3.
In the GetObject
request, specify the full key name for the object.
General purpose buckets - Both the virtual-hosted-style requests and the path-style requests are supported. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg
, specify the object key name as /photos/2006/February/sample.jpg
. For a path-style request example, if you have the object photos/2006/February/sample.jpg
in the bucket named examplebucket
, specify the object key name as /examplebucket/photos/2006/February/sample.jpg
. For more information about request types, see HTTP Host Header Bucket Specification in the Amazon S3 User Guide.
Directory buckets - Only virtual-hosted-style requests are supported. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg
in the bucket named examplebucket--use1-az5--x-s3
, specify the object key name as /photos/2006/February/sample.jpg
. Also, when you make requests to this API operation, your requests are sent to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.
General purpose bucket permissions - You must have the required permissions in a policy. To use GetObject
, you must have the READ
access to the object (or version). If you grant READ
access to the anonymous user, the GetObject
operation returns the object without using an authorization header. For more information, see Specifying permissions in a policy in the Amazon S3 User Guide.
If you include a versionId
in your request header, you must have the s3:GetObjectVersion
permission to access a specific version of an object. The s3:GetObject
permission is not required in this scenario.
If you request the current version of an object without a specific versionId
in the request header, only the s3:GetObject
permission is required. The s3:GetObjectVersion
permission is not required in this scenario.
If the object that you request doesn’t exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found
error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an HTTP status code 403 Access Denied
error.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If the object is encrypted using SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this operation returns an InvalidObjectState
error. For information about restoring archived objects, see Restoring Archived Objects in the Amazon S3 User Guide.
Directory buckets - For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. Unsupported storage class values won't write a destination object and will respond with the HTTP status code 400 Bad Request
.
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for the GetObject
requests, if your object uses server-side encryption with Amazon S3 managed encryption keys (SSE-S3), server-side encryption with Key Management Service (KMS) keys (SSE-KMS), or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you include the header in your GetObject
requests for the object that uses these types of keys, you’ll get an HTTP 400 Bad Request
error.
Directory buckets - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide.
There are times when you want to override certain response header values of a GetObject
response. For example, you might override the Content-Disposition
response header value through your GetObject
request.
You can override values for a set of response headers. These modified response header values are included only in a successful response, that is, when the HTTP status code 200 OK
is returned. The headers you can override using the following query parameters in the request are a subset of the headers that Amazon S3 accepts when you create an object.
The response headers that you can override for the GetObject
response are Cache-Control
, Content-Disposition
, Content-Encoding
, Content-Language
, Content-Type
, and Expires
.
To override values for a set of response headers in the GetObject
response, you can use the following query parameters in the request.
response-cache-control
response-content-disposition
response-content-encoding
response-content-language
response-content-type
response-expires
When you use these parameters, you must sign the request by using either an Authorization header or a presigned URL. These parameters cannot be used with an unsigned (anonymous) request.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to GetObject
:
Retrieves an object from Amazon S3.
In the GetObject
request, specify the full key name for the object.
General purpose buckets - Both the virtual-hosted-style requests and the path-style requests are supported. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg
, specify the object key name as /photos/2006/February/sample.jpg
. For a path-style request example, if you have the object photos/2006/February/sample.jpg
in the bucket named examplebucket
, specify the object key name as /examplebucket/photos/2006/February/sample.jpg
. For more information about request types, see HTTP Host Header Bucket Specification in the Amazon S3 User Guide.
Directory buckets - Only virtual-hosted-style requests are supported. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg
in the bucket named examplebucket--use1-az5--x-s3
, specify the object key name as /photos/2006/February/sample.jpg
. Also, when you make requests to this API operation, your requests are sent to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
General purpose bucket permissions - You must have the required permissions in a policy. To use GetObject
, you must have the READ
access to the object (or version). If you grant READ
access to the anonymous user, the GetObject
operation returns the object without using an authorization header. For more information, see Specifying permissions in a policy in the Amazon S3 User Guide.
If you include a versionId
in your request header, you must have the s3:GetObjectVersion
permission to access a specific version of an object. The s3:GetObject
permission is not required in this scenario.
If you request the current version of an object without a specific versionId
in the request header, only the s3:GetObject
permission is required. The s3:GetObjectVersion
permission is not required in this scenario.
If the object that you request doesn’t exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found
error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an HTTP status code 403 Access Denied
error.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If the object is encrypted using SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this operation returns an InvalidObjectState
error. For information about restoring archived objects, see Restoring Archived Objects in the Amazon S3 User Guide.
Directory buckets - For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. Unsupported storage class values won't write a destination object and will respond with the HTTP status code 400 Bad Request
.
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for the GetObject
requests, if your object uses server-side encryption with Amazon S3 managed encryption keys (SSE-S3), server-side encryption with Key Management Service (KMS) keys (SSE-KMS), or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you include the header in your GetObject
requests for the object that uses these types of keys, you’ll get an HTTP 400 Bad Request
error.
Directory buckets - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide.
There are times when you want to override certain response header values of a GetObject
response. For example, you might override the Content-Disposition
response header value through your GetObject
request.
You can override values for a set of response headers. These modified response header values are included only in a successful response, that is, when the HTTP status code 200 OK
is returned. The headers you can override using the following query parameters in the request are a subset of the headers that Amazon S3 accepts when you create an object.
The response headers that you can override for the GetObject
response are Cache-Control
, Content-Disposition
, Content-Encoding
, Content-Language
, Content-Type
, and Expires
.
To override values for a set of response headers in the GetObject
response, you can use the following query parameters in the request.
response-cache-control
response-content-disposition
response-content-encoding
response-content-language
response-content-type
response-expires
When you use these parameters, you must sign the request by using either an Authorization header or a presigned URL. These parameters cannot be used with an unsigned (anonymous) request.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to GetObject
:
Retrieves all the metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata.
GetObjectAttributes
combines the functionality of HeadObject
and ListParts
. All of the data returned with each of those individual calls can be returned with a single call to GetObjectAttributes
.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.
General purpose bucket permissions - To use GetObjectAttributes
, you must have READ access to the object. The permissions that you need to use this operation depend on whether the bucket is versioned. If the bucket is versioned, you need both the s3:GetObjectVersion
and s3:GetObjectVersionAttributes
permissions for this operation. If the bucket is not versioned, you need the s3:GetObject
and s3:GetObjectAttributes
permissions. For more information, see Specifying Permissions in a Policy in the Amazon S3 User Guide. If the object that you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found
(\"no such key\") error.
If you don't have the s3:ListBucket
permission, Amazon S3 returns an HTTP status code 403 Forbidden
(\"access denied\") error.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for HEAD
requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption
header is used when you PUT
an object to S3 and want to specify the encryption method. If you include this header in a GET
request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request
error. It's because the encryption method can't be changed when you retrieve the object.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.
Directory bucket permissions - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession
requests or PUT
object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.
Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null
value of the version ID is supported by directory buckets. You can only specify null
to the versionId
query parameter in the request.
Consider the following when using request headers:
If both of the If-Match
and If-Unmodified-Since
headers are present in the request as follows, then Amazon S3 returns the HTTP status code 200 OK
and the data requested:
If-Match
condition evaluates to true
.
If-Unmodified-Since
condition evaluates to false
.
For more information about conditional requests, see RFC 7232.
If both of the If-None-Match
and If-Modified-Since
headers are present in the request as follows, then Amazon S3 returns the HTTP status code 304 Not Modified
:
If-None-Match
condition evaluates to false
.
If-Modified-Since
condition evaluates to true
.
For more information about conditional requests, see RFC 7232.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following actions are related to GetObjectAttributes
:
Retrieves all the metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata.
GetObjectAttributes
combines the functionality of HeadObject
and ListParts
. All of the data returned with each of those individual calls can be returned with a single call to GetObjectAttributes
.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
General purpose bucket permissions - To use GetObjectAttributes
, you must have READ access to the object. The permissions that you need to use this operation depend on whether the bucket is versioned. If the bucket is versioned, you need both the s3:GetObjectVersion
and s3:GetObjectVersionAttributes
permissions for this operation. If the bucket is not versioned, you need the s3:GetObject
and s3:GetObjectAttributes
permissions. For more information, see Specifying Permissions in a Policy in the Amazon S3 User Guide. If the object that you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found
(\"no such key\") error.
If you don't have the s3:ListBucket
permission, Amazon S3 returns an HTTP status code 403 Forbidden
(\"access denied\") error.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for HEAD
requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption
header is used when you PUT
an object to S3 and want to specify the encryption method. If you include this header in a GET
request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request
error. It's because the encryption method can't be changed when you retrieve the object.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.
Directory bucket permissions - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession
requests or PUT
object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.
Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null
value of the version ID is supported by directory buckets. You can only specify null
to the versionId
query parameter in the request.
Consider the following when using request headers:
If both of the If-Match
and If-Unmodified-Since
headers are present in the request as follows, then Amazon S3 returns the HTTP status code 200 OK
and the data requested:
If-Match
condition evaluates to true
.
If-Unmodified-Since
condition evaluates to false
.
For more information about conditional requests, see RFC 7232.
If both of the If-None-Match
and If-Modified-Since
headers are present in the request as follows, then Amazon S3 returns the HTTP status code 304 Not Modified
:
If-None-Match
condition evaluates to false
.
If-Modified-Since
condition evaluates to true
.
For more information about conditional requests, see RFC 7232.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following actions are related to GetObjectAttributes
:
You can use this operation to determine if a bucket exists and if you have permission to access it. The action returns a 200 OK
if the bucket exists and you have permission to access it.
If the bucket does not exist or you do not have permission to access it, the HEAD
request returns a generic 400 Bad Request
, 403 Forbidden
or 404 Not Found
code. A message body is not included, so you cannot determine the exception beyond these HTTP response codes.
General purpose buckets - Request to public buckets that grant the s3:ListBucket permission publicly do not need to be signed. All other HeadBucket
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed. For more information, see REST Authentication.
Directory buckets - You must use IAM credentials to authenticate and authorize your access to the HeadBucket
API operation, instead of using the temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
General purpose bucket permissions - To use this operation, you must have permissions to perform the s3:ListBucket
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Managing access permissions to your Amazon S3 resources in the Amazon S3 User Guide.
Directory bucket permissions - You must have the s3express:CreateSession
permission in the Action
element of a policy. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the bucket.
For more information about example bucket policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.
You can use this operation to determine if a bucket exists and if you have permission to access it. The action returns a 200 OK
if the bucket exists and you have permission to access it.
If the bucket does not exist or you do not have permission to access it, the HEAD
request returns a generic 400 Bad Request
, 403 Forbidden
or 404 Not Found
code. A message body is not included, so you cannot determine the exception beyond these HTTP response codes.
General purpose buckets - Request to public buckets that grant the s3:ListBucket permission publicly do not need to be signed. All other HeadBucket
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed. For more information, see REST Authentication.
Directory buckets - You must use IAM credentials to authenticate and authorize your access to the HeadBucket
API operation, instead of using the temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
General purpose bucket permissions - To use this operation, you must have permissions to perform the s3:ListBucket
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Managing access permissions to your Amazon S3 resources in the Amazon S3 User Guide.
Directory bucket permissions - You must have the s3express:CreateSession
permission in the Action
element of a policy. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the bucket.
For more information about example bucket policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
The HEAD
operation retrieves metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata.
A HEAD
request has the same options as a GET
operation on an object. The response is identical to the GET
response except that there is no response body. Because of this, if the HEAD
request generates an error, it returns a generic code, such as 400 Bad Request
, 403 Forbidden
, 404 Not Found
, 405 Method Not Allowed
, 412 Precondition Failed
, or 304 Not Modified
. It's not possible to retrieve the exact exception of these error codes.
Request headers are limited to 8 KB in size. For more information, see Common Request Headers.
General purpose bucket permissions - To use HEAD
, you must have the s3:GetObject
permission. You need the relevant read object (or version) permission for this operation. For more information, see Actions, resources, and condition keys for Amazon S3 in the Amazon S3 User Guide. For more information about the permissions to S3 API operations by S3 resource types, see Required permissions for Amazon S3 API operations in the Amazon S3 User Guide.
If the object you request doesn't exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found
error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an HTTP status code 403 Forbidden
error.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If you enable x-amz-checksum-mode
in the request and the object is encrypted with Amazon Web Services Key Management Service (Amazon Web Services KMS), you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key to retrieve the checksum of the object.
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for HEAD
requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption
header is used when you PUT
an object to S3 and want to specify the encryption method. If you include this header in a HEAD
request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request
error. It's because the encryption method can't be changed when you retrieve the object.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.
Directory bucket - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide.
If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true
in the response.
If the specified version is a delete marker, the response returns a 405 Method Not Allowed
error and the Last-Modified: timestamp
response header.
Directory buckets - Delete marker is not supported for directory buckets.
Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null
value of the version ID is supported by directory buckets. You can only specify null
to the versionId
query parameter in the request.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.
The following actions are related to HeadObject
:
The HEAD
operation retrieves metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata.
A HEAD
request has the same options as a GET
operation on an object. The response is identical to the GET
response except that there is no response body. Because of this, if the HEAD
request generates an error, it returns a generic code, such as 400 Bad Request
, 403 Forbidden
, 404 Not Found
, 405 Method Not Allowed
, 412 Precondition Failed
, or 304 Not Modified
. It's not possible to retrieve the exact exception of these error codes.
Request headers are limited to 8 KB in size. For more information, see Common Request Headers.
General purpose bucket permissions - To use HEAD
, you must have the s3:GetObject
permission. You need the relevant read object (or version) permission for this operation. For more information, see Actions, resources, and condition keys for Amazon S3 in the Amazon S3 User Guide. For more information about the permissions to S3 API operations by S3 resource types, see Required permissions for Amazon S3 API operations in the Amazon S3 User Guide.
If the object you request doesn't exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found
error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an HTTP status code 403 Forbidden
error.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If you enable x-amz-checksum-mode
in the request and the object is encrypted with Amazon Web Services Key Management Service (Amazon Web Services KMS), you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key to retrieve the checksum of the object.
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for HEAD
requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption
header is used when you PUT
an object to S3 and want to specify the encryption method. If you include this header in a HEAD
request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request
error. It's because the encryption method can't be changed when you retrieve the object.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.
Directory bucket - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide.
If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true
in the response.
If the specified version is a delete marker, the response returns a 405 Method Not Allowed
error and the Last-Modified: timestamp
response header.
Directory buckets - Delete marker is not supported for directory buckets.
Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null
value of the version ID is supported by directory buckets. You can only specify null
to the versionId
query parameter in the request.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
The following actions are related to HeadObject
:
Returns a list of all Amazon S3 directory buckets owned by the authenticated sender of the request. For more information about directory buckets, see Directory buckets in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.
You must have the s3express:ListAllMyDirectoryBuckets
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
The BucketRegion
response element is not part of the ListDirectoryBuckets
Response Syntax.
Returns a list of all Amazon S3 directory buckets owned by the authenticated sender of the request. For more information about directory buckets, see Directory buckets in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
You must have the s3express:ListAllMyDirectoryBuckets
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
The BucketRegion
response element is not part of the ListDirectoryBuckets
Response Syntax.
This operation lists in-progress multipart uploads in a bucket. An in-progress multipart upload is a multipart upload that has been initiated by the CreateMultipartUpload
request, but has not yet been completed or aborted.
Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. To delete these in-progress multipart uploads, use the ListMultipartUploads
operation to list the in-progress multipart uploads in the bucket and use the AbortMultipartUpload
operation to abort all the in-progress multipart uploads.
The ListMultipartUploads
operation returns a maximum of 1,000 multipart uploads in the response. The limit of 1,000 multipart uploads is also the default value. You can further limit the number of uploads in a response by specifying the max-uploads
request parameter. If there are more than 1,000 multipart uploads that satisfy your ListMultipartUploads
request, the response returns an IsTruncated
element with the value of true
, a NextKeyMarker
element, and a NextUploadIdMarker
element. To list the remaining multipart uploads, you need to make subsequent ListMultipartUploads
requests. In these requests, include two query parameters: key-marker
and upload-id-marker
. Set the value of key-marker
to the NextKeyMarker
value from the previous response. Similarly, set the value of upload-id-marker
to the NextUploadIdMarker
value from the previous response.
Directory buckets - The upload-id-marker
element and the NextUploadIdMarker
element aren't supported by directory buckets. To list the additional multipart uploads, you only need to set the value of key-marker
to the NextKeyMarker
value from the previous response.
For more information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.
General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
General purpose bucket - In the ListMultipartUploads
response, the multipart uploads are sorted based on two criteria:
Key-based sorting - Multipart uploads are initially sorted in ascending order based on their object keys.
Time-based sorting - For uploads that share the same object key, they are further sorted in ascending order based on the upload initiation time. Among uploads with the same key, the one that was initiated first will appear before the ones that were initiated later.
Directory bucket - In the ListMultipartUploads
response, the multipart uploads aren't sorted lexicographically based on the object keys.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to ListMultipartUploads
:
This operation lists in-progress multipart uploads in a bucket. An in-progress multipart upload is a multipart upload that has been initiated by the CreateMultipartUpload
request, but has not yet been completed or aborted.
Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. To delete these in-progress multipart uploads, use the ListMultipartUploads
operation to list the in-progress multipart uploads in the bucket and use the AbortMultipartUpload
operation to abort all the in-progress multipart uploads.
The ListMultipartUploads
operation returns a maximum of 1,000 multipart uploads in the response. The limit of 1,000 multipart uploads is also the default value. You can further limit the number of uploads in a response by specifying the max-uploads
request parameter. If there are more than 1,000 multipart uploads that satisfy your ListMultipartUploads
request, the response returns an IsTruncated
element with the value of true
, a NextKeyMarker
element, and a NextUploadIdMarker
element. To list the remaining multipart uploads, you need to make subsequent ListMultipartUploads
requests. In these requests, include two query parameters: key-marker
and upload-id-marker
. Set the value of key-marker
to the NextKeyMarker
value from the previous response. Similarly, set the value of upload-id-marker
to the NextUploadIdMarker
value from the previous response.
Directory buckets - The upload-id-marker
element and the NextUploadIdMarker
element aren't supported by directory buckets. To list the additional multipart uploads, you only need to set the value of key-marker
to the NextKeyMarker
value from the previous response.
For more information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
General purpose bucket - In the ListMultipartUploads
response, the multipart uploads are sorted based on two criteria:
Key-based sorting - Multipart uploads are initially sorted in ascending order based on their object keys.
Time-based sorting - For uploads that share the same object key, they are further sorted in ascending order based on the upload initiation time. Among uploads with the same key, the one that was initiated first will appear before the ones that were initiated later.
Directory bucket - In the ListMultipartUploads
response, the multipart uploads aren't sorted lexicographically based on the object keys.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to ListMultipartUploads
:
Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK
response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. For more information about listing objects, see Listing object keys programmatically in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets.
General purpose bucket - For general purpose buckets, ListObjectsV2
doesn't return prefixes that are related only to in-progress multipart uploads.
Directory buckets - For directory buckets, ListObjectsV2
response includes the prefixes that are related only to in-progress multipart uploads.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.
General purpose bucket permissions - To use this operation, you must have READ access to the bucket. You must have permission to perform the s3:ListBucket
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
General purpose bucket - For general purpose buckets, ListObjectsV2
returns objects in lexicographical order based on their key names.
Directory bucket - For directory buckets, ListObjectsV2
does not return objects in lexicographical order.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
This section describes the latest revision of this action. We recommend that you use this revised API operation for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API operation, ListObjects.
The following operations are related to ListObjectsV2
:
Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK
response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. For more information about listing objects, see Listing object keys programmatically in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets.
General purpose bucket - For general purpose buckets, ListObjectsV2
doesn't return prefixes that are related only to in-progress multipart uploads.
Directory buckets - For directory buckets, ListObjectsV2
response includes the prefixes that are related only to in-progress multipart uploads.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
General purpose bucket permissions - To use this operation, you must have READ access to the bucket. You must have permission to perform the s3:ListBucket
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
General purpose bucket - For general purpose buckets, ListObjectsV2
returns objects in lexicographical order based on their key names.
Directory bucket - For directory buckets, ListObjectsV2
does not return objects in lexicographical order.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
This section describes the latest revision of this action. We recommend that you use this revised API operation for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API operation, ListObjects.
The following operations are related to ListObjectsV2
:
Lists the parts that have been uploaded for a specific multipart upload.
To use this operation, you must provide the upload ID
in the request. You obtain this uploadID by sending the initiate multipart upload request through CreateMultipartUpload.
The ListParts
request returns a maximum of 1,000 uploaded parts. The limit of 1,000 parts is also the default value. You can restrict the number of parts in a response by specifying the max-parts
request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an IsTruncated
field with the value of true
, and a NextPartNumberMarker
element. To list remaining uploaded parts, in subsequent ListParts
requests, include the part-number-marker
query string parameter and set its value to the NextPartNumberMarker
field value from the previous response.
For more information on multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.
General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.
If the upload was created using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), you must have permission to the kms:Decrypt
action for the ListParts
request to succeed.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to ListParts
:
Lists the parts that have been uploaded for a specific multipart upload.
To use this operation, you must provide the upload ID
in the request. You obtain this uploadID by sending the initiate multipart upload request through CreateMultipartUpload.
The ListParts
request returns a maximum of 1,000 uploaded parts. The limit of 1,000 parts is also the default value. You can restrict the number of parts in a response by specifying the max-parts
request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an IsTruncated
field with the value of true
, and a NextPartNumberMarker
element. To list remaining uploaded parts, in subsequent ListParts
requests, include the part-number-marker
query string parameter and set its value to the NextPartNumberMarker
field value from the previous response.
For more information on multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.
If the upload was created using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), you must have permission to the kms:Decrypt
action for the ListParts
request to succeed.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to ListParts
:
This operation configures default encryption and Amazon S3 Bucket Keys for an existing bucket.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.
By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3).
General purpose buckets
You can optionally configure default encryption for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys. For information about the bucket default encryption feature, see Amazon S3 Bucket Default Encryption in the Amazon S3 User Guide.
If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 doesn't validate the KMS key ID provided in PutBucketEncryption requests.
Directory buckets - You can optionally configure default encryption for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS).
We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession
requests or PUT
object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.
Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3
) isn't supported.
S3 Bucket Keys are always enabled for GET
and PUT
operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.
When you specify an KMS customer managed key for encryption in your directory bucket, only use the key ID or key ARN. The key alias format of the KMS key isn't supported.
For directory buckets, if you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, Amazon S3 validates the KMS key ID provided in PutBucketEncryption requests.
If you're specifying a customer managed KMS key, we recommend using a fully qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the requester’s account. This behavior can result in data that's encrypted with a KMS key that belongs to the requester, and not the bucket owner.
Also, this action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature Version 4).
General purpose bucket permissions - The s3:PutEncryptionConfiguration
permission is required in a policy. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation, you must have the s3express:PutEncryptionConfiguration
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
To set a directory bucket default encryption with SSE-KMS, you must also have the kms:GenerateDataKey
and the kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the target KMS key.
Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com
.
The following operations are related to PutBucketEncryption
:
This operation configures default encryption and Amazon S3 Bucket Keys for an existing bucket.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3).
General purpose buckets
You can optionally configure default encryption for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys. For information about the bucket default encryption feature, see Amazon S3 Bucket Default Encryption in the Amazon S3 User Guide.
If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 doesn't validate the KMS key ID provided in PutBucketEncryption requests.
Directory buckets - You can optionally configure default encryption for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS).
We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession
requests or PUT
object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.
Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3
) isn't supported.
S3 Bucket Keys are always enabled for GET
and PUT
operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.
When you specify an KMS customer managed key for encryption in your directory bucket, only use the key ID or key ARN. The key alias format of the KMS key isn't supported.
For directory buckets, if you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, Amazon S3 validates the KMS key ID provided in PutBucketEncryption requests.
If you're specifying a customer managed KMS key, we recommend using a fully qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the requester’s account. This behavior can result in data that's encrypted with a KMS key that belongs to the requester, and not the bucket owner.
Also, this action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature Version 4).
General purpose bucket permissions - The s3:PutEncryptionConfiguration
permission is required in a policy. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation, you must have the s3express:PutEncryptionConfiguration
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
To set a directory bucket default encryption with SSE-KMS, you must also have the kms:GenerateDataKey
and the kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the target KMS key.
Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com
.
The following operations are related to PutBucketEncryption
:
For an updated version of this API, see PutBucketLifecycleConfiguration. This version has been deprecated. Existing lifecycle configurations will work. For new lifecycle configurations, use the updated API.
This operation is not supported for directory buckets.
Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Object Lifecycle Management in the Amazon S3 User Guide.
By default, all Amazon S3 resources, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration) are private. Only the resource owner, the Amazon Web Services account that created the resource, can access it. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, users must get the s3:PutLifecycleConfiguration
permission.
You can also explicitly deny permissions. Explicit denial also supersedes any other permissions. If you want to prevent users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:
s3:DeleteObject
s3:DeleteObjectVersion
s3:PutLifecycleConfiguration
For more information about permissions, see Managing Access Permissions to your Amazon S3 Resources in the Amazon S3 User Guide.
For more examples of transitioning objects to storage classes such as STANDARD_IA or ONEZONE_IA, see Examples of Lifecycle Configuration.
The following operations are related to PutBucketLifecycle
:
GetBucketLifecycle(Deprecated)
By default, a resource owner—in this case, a bucket owner, which is the Amazon Web Services account that created the bucket—can perform any of the operations. A resource owner can also grant others permission to perform the operation. For more information, see the following topics in the Amazon S3 User Guide:
This operation is not supported for directory buckets.
For an updated version of this API, see PutBucketLifecycleConfiguration. This version has been deprecated. Existing lifecycle configurations will work. For new lifecycle configurations, use the updated API.
This operation is not supported for directory buckets.
Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Object Lifecycle Management in the Amazon S3 User Guide.
By default, all Amazon S3 resources, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration) are private. Only the resource owner, the Amazon Web Services account that created the resource, can access it. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, users must get the s3:PutLifecycleConfiguration
permission.
You can also explicitly deny permissions. Explicit denial also supersedes any other permissions. If you want to prevent users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:
s3:DeleteObject
s3:DeleteObjectVersion
s3:PutLifecycleConfiguration
For more information about permissions, see Managing Access Permissions to your Amazon S3 Resources in the Amazon S3 User Guide.
For more examples of transitioning objects to storage classes such as STANDARD_IA or ONEZONE_IA, see Examples of Lifecycle Configuration.
The following operations are related to PutBucketLifecycle
:
GetBucketLifecycle(Deprecated)
By default, a resource owner—in this case, a bucket owner, which is the Amazon Web Services account that created the bucket—can perform any of the operations. A resource owner can also grant others permission to perform the operation. For more information, see the following topics in the Amazon S3 User Guide:
Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. Keep in mind that this will overwrite an existing lifecycle configuration, so if you want to retain any configuration details, they must be included in the new lifecycle configuration. For information about lifecycle configuration, see Managing your storage lifecycle.
Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, object size, or any combination of these. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility. For the related API description, see PutBucketLifecycle.
You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. An Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not adjustable.
Bucket lifecycle configuration supports specifying a lifecycle rule using an object key name prefix, one or more object tags, object size, or any combination of these. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility for general purpose buckets. For the related API description, see PutBucketLifecycle.
Lifecyle configurations for directory buckets only support expiring objects and cancelling multipart uploads. Expiring of versioned objects,transitions and tag filters are not supported.
A lifecycle rule consists of the following:
A filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix, object tags, object size, or any combination of these.
A status indicating whether the rule is in effect.
One or more lifecycle transition and expiration actions that you want Amazon S3 to perform on the objects identified by the filter. If the state of your bucket is versioning-enabled or versioning-suspended, you can have many versions of the same object (one current version and zero or more noncurrent versions). Amazon S3 provides predefined actions that you can specify for current and noncurrent object versions.
For more information, see Object Lifecycle Management and Lifecycle Configuration Elements.
General purpose bucket permissions - By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must have the s3:PutLifecycleConfiguration
permission.
You can also explicitly deny permissions. An explicit deny also supersedes any other permissions. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:
s3:DeleteObject
s3:DeleteObjectVersion
s3:PutLifecycleConfiguration
For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.
Directory bucket permissions - You must have the s3express:PutLifecycleConfiguration
permission in an IAM identity-based policy to use this operation. Cross-account access to this API operation isn't supported. The resource owner can optionally grant access permissions to others by creating a role or user for them as long as they are within the same account as the owner and resource.
For more information about directory bucket policies and permissions, see Authorizing Regional endpoint APIs with IAM in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
The following operations are related to PutBucketLifecycleConfiguration
:
Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. Keep in mind that this will overwrite an existing lifecycle configuration, so if you want to retain any configuration details, they must be included in the new lifecycle configuration. For information about lifecycle configuration, see Managing your storage lifecycle.
Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, object size, or any combination of these. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility. For the related API description, see PutBucketLifecycle.
You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. An Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not adjustable.
Bucket lifecycle configuration supports specifying a lifecycle rule using an object key name prefix, one or more object tags, object size, or any combination of these. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility for general purpose buckets. For the related API description, see PutBucketLifecycle.
Lifecyle configurations for directory buckets only support expiring objects and cancelling multipart uploads. Expiring of versioned objects,transitions and tag filters are not supported.
A lifecycle rule consists of the following:
A filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix, object tags, object size, or any combination of these.
A status indicating whether the rule is in effect.
One or more lifecycle transition and expiration actions that you want Amazon S3 to perform on the objects identified by the filter. If the state of your bucket is versioning-enabled or versioning-suspended, you can have many versions of the same object (one current version and zero or more noncurrent versions). Amazon S3 provides predefined actions that you can specify for current and noncurrent object versions.
For more information, see Object Lifecycle Management and Lifecycle Configuration Elements.
General purpose bucket permissions - By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must have the s3:PutLifecycleConfiguration
permission.
You can also explicitly deny permissions. An explicit deny also supersedes any other permissions. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:
s3:DeleteObject
s3:DeleteObjectVersion
s3:PutLifecycleConfiguration
For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.
Directory bucket permissions - You must have the s3express:PutLifecycleConfiguration
permission in an IAM identity-based policy to use this operation. Cross-account access to this API operation isn't supported. The resource owner can optionally grant access permissions to others by creating a role or user for them as long as they are within the same account as the owner and resource.
For more information about directory bucket policies and permissions, see Authorizing Regional endpoint APIs with IAM in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
The following operations are related to PutBucketLifecycleConfiguration
:
Applies an Amazon S3 bucket policy to an Amazon S3 bucket.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.
If you are using an identity other than the root user of the Amazon Web Services account that owns the bucket, the calling identity must both have the PutBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.
If you don't have PutBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
To ensure that bucket owners don't inadvertently lock themselves out of their own buckets, the root principal in a bucket owner's Amazon Web Services account can perform the GetBucketPolicy
, PutBucketPolicy
, and DeleteBucketPolicy
API actions, even if their bucket policy explicitly denies the root principal's access. Bucket owner root principals can only be blocked from performing these API actions by VPC endpoint policies and Amazon Web Services Organizations policies.
General purpose bucket permissions - The s3:PutBucketPolicy
permission is required in a policy. For more information about general purpose buckets bucket policies, see Using Bucket Policies and User Policies in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation, you must have the s3express:PutBucketPolicy
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
General purpose buckets example bucket policies - See Bucket policy examples in the Amazon S3 User Guide.
Directory bucket example bucket policies - See Example bucket policies for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com
.
The following operations are related to PutBucketPolicy
:
Applies an Amazon S3 bucket policy to an Amazon S3 bucket.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
If you are using an identity other than the root user of the Amazon Web Services account that owns the bucket, the calling identity must both have the PutBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.
If you don't have PutBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
To ensure that bucket owners don't inadvertently lock themselves out of their own buckets, the root principal in a bucket owner's Amazon Web Services account can perform the GetBucketPolicy
, PutBucketPolicy
, and DeleteBucketPolicy
API actions, even if their bucket policy explicitly denies the root principal's access. Bucket owner root principals can only be blocked from performing these API actions by VPC endpoint policies and Amazon Web Services Organizations policies.
General purpose bucket permissions - The s3:PutBucketPolicy
permission is required in a policy. For more information about general purpose buckets bucket policies, see Using Bucket Policies and User Policies in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation, you must have the s3express:PutBucketPolicy
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
General purpose buckets example bucket policies - See Bucket policy examples in the Amazon S3 User Guide.
Directory bucket example bucket policies - See Example bucket policies for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com
.
The following operations are related to PutBucketPolicy
:
This operation is not supported for directory buckets.
When you enable versioning on a bucket for the first time, it might take a short amount of time for the change to be fully propagated. While this change is propagating, you may encounter intermittent HTTP 404 NoSuchKey
errors for requests to objects created or updated after enabling versioning. We recommend that you wait for 15 minutes after enabling versioning before issuing write operations (PUT
or DELETE
) on objects in the bucket.
Sets the versioning state of an existing bucket.
You can set the versioning state with one of the following values:
Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID.
Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null.
If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning request does not return a versioning state value.
In order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner and want to enable MFA Delete in the bucket versioning configuration, you must include the x-amz-mfa request
header and the Status
and the MfaDelete
request elements in a request to set the versioning state of the bucket.
If you have an object expiration lifecycle configuration in your non-versioned bucket and you want to maintain the same permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle configuration will manage the deletes of the noncurrent object versions in the version-enabled bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more information, see Lifecycle and Versioning.
The following operations are related to PutBucketVersioning
:
This operation is not supported for directory buckets.
When you enable versioning on a bucket for the first time, it might take a short amount of time for the change to be fully propagated. While this change is propagating, you might encounter intermittent HTTP 404 NoSuchKey
errors for requests to objects created or updated after enabling versioning. We recommend that you wait for 15 minutes after enabling versioning before issuing write operations (PUT
or DELETE
) on objects in the bucket.
Sets the versioning state of an existing bucket.
You can set the versioning state with one of the following values:
Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID.
Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null.
If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning request does not return a versioning state value.
In order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner and want to enable MFA Delete in the bucket versioning configuration, you must include the x-amz-mfa request
header and the Status
and the MfaDelete
request elements in a request to set the versioning state of the bucket.
If you have an object expiration lifecycle configuration in your non-versioned bucket and you want to maintain the same permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle configuration will manage the deletes of the noncurrent object versions in the version-enabled bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more information, see Lifecycle and Versioning.
The following operations are related to PutBucketVersioning
:
Adds an object to a bucket.
Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket. You cannot use PutObject
to only update a single piece of metadata for an existing object. You must put the entire object with updated metadata if you want to update some values.
If your bucket uses the bucket owner enforced setting for Object Ownership, ACLs are disabled and no longer affect permissions. All objects written to the bucket by any account will be owned by the bucket owner.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.
Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. However, Amazon S3 provides features that can modify this behavior:
S3 Object Lock - To prevent objects from being deleted or overwritten, you can use Amazon S3 Object Lock in the Amazon S3 User Guide.
This functionality is not supported for directory buckets.
S3 Versioning - When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all versions of the objects. For each write request that is made to the same object, Amazon S3 automatically generates a unique version ID of that object being stored in Amazon S3. You can retrieve, replace, or delete any version of the object. For more information about versioning, see Adding Objects to Versioning-Enabled Buckets in the Amazon S3 User Guide. For information about returning the versioning state of a bucket, see GetBucketVersioning.
This functionality is not supported for directory buckets.
General purpose bucket permissions - The following permissions are required in your policies when your PutObject
request includes specific headers.
s3:PutObject
- To successfully complete the PutObject
request, you must always have the s3:PutObject
permission on a bucket to add an object to it.
s3:PutObjectAcl
- To successfully change the objects ACL of your PutObject
request, you must have the s3:PutObjectAcl
.
s3:PutObjectTagging
- To successfully set the tag-set with your PutObject
request, you must have the s3:PutObjectTagging
.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
General purpose bucket - To ensure that data is not corrupted traversing the network, use the Content-MD5
header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 digest, you can calculate the MD5 while putting the object to Amazon S3 and compare the returned ETag to the calculated MD5 value.
Directory bucket - This functionality is not supported for directory buckets.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
For more information about related Amazon S3 APIs, see the following:
", + "documentation":"Adds an object to a bucket.
Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket. You cannot use PutObject
to only update a single piece of metadata for an existing object. You must put the entire object with updated metadata if you want to update some values.
If your bucket uses the bucket owner enforced setting for Object Ownership, ACLs are disabled and no longer affect permissions. All objects written to the bucket by any account will be owned by the bucket owner.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. However, Amazon S3 provides features that can modify this behavior:
S3 Object Lock - To prevent objects from being deleted or overwritten, you can use Amazon S3 Object Lock in the Amazon S3 User Guide.
This functionality is not supported for directory buckets.
S3 Versioning - When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all versions of the objects. For each write request that is made to the same object, Amazon S3 automatically generates a unique version ID of that object being stored in Amazon S3. You can retrieve, replace, or delete any version of the object. For more information about versioning, see Adding Objects to Versioning-Enabled Buckets in the Amazon S3 User Guide. For information about returning the versioning state of a bucket, see GetBucketVersioning.
This functionality is not supported for directory buckets.
General purpose bucket permissions - The following permissions are required in your policies when your PutObject
request includes specific headers.
s3:PutObject
- To successfully complete the PutObject
request, you must always have the s3:PutObject
permission on a bucket to add an object to it.
s3:PutObjectAcl
- To successfully change the objects ACL of your PutObject
request, you must have the s3:PutObjectAcl
.
s3:PutObjectTagging
- To successfully set the tag-set with your PutObject
request, you must have the s3:PutObjectTagging
.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
General purpose bucket - To ensure that data is not corrupted traversing the network, use the Content-MD5
header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 digest, you can calculate the MD5 while putting the object to Amazon S3 and compare the returned ETag to the calculated MD5 value.
Directory bucket - This functionality is not supported for directory buckets.
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
For more information about related Amazon S3 APIs, see the following:
", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", "requestChecksumRequired":false @@ -1417,7 +1418,7 @@ "input":{"shape":"UploadPartRequest"}, "output":{"shape":"UploadPartOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadUploadPart.html", - "documentation":"Uploads a part in a multipart upload.
In this operation, you provide new data as a part of an object in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the UploadPartCopy operation.
You must initiate a multipart upload (see CreateMultipartUpload) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier that you must include in your upload part request.
Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten.
For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide.
After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.
For more information on multipart uploads, go to Multipart Upload Overview in the Amazon S3 User Guide .
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.
General purpose bucket permissions - To perform a multipart upload with encryption using an Key Management Service key, the requester must have permission to the kms:Decrypt
and kms:GenerateDataKey
actions on the key. The requester must also have permissions for the kms:GenerateDataKey
action for the CreateMultipartUpload
API. Then, the requester needs permissions for the kms:Decrypt
action on the UploadPart
and UploadPartCopy
APIs.
These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information about KMS permissions, see Protecting data using server-side encryption with KMS in the Amazon S3 User Guide. For information about the permissions required to use the multipart upload API, see Multipart upload and permissions and Multipart upload API and permissions in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
General purpose bucket - To ensure that data is not corrupted traversing the network, specify the Content-MD5
header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error. If the upload request is signed with Signature Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256
header as a checksum instead of Content-MD5
. For more information see Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4).
Directory buckets - MD5 is not supported by directory buckets. You can use checksum algorithms to check object integrity.
General purpose bucket - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You have mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption with other key options. The option you use depends on whether you want to use KMS keys (SSE-KMS) or provide your own encryption key (SSE-C).
Server-side encryption is supported by the S3 Multipart Upload operations. Unless you are using a customer-provided encryption key (SSE-C), you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see CreateMultipartUpload.
If you request server-side encryption using a customer-provided encryption key (SSE-C) in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following request headers.
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information, see Using Server-Side Encryption in the Amazon S3 User Guide.
Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
).
Error Code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to UploadPart
:
Uploads a part in a multipart upload.
In this operation, you provide new data as a part of an object in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the UploadPartCopy operation.
You must initiate a multipart upload (see CreateMultipartUpload) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier that you must include in your upload part request.
Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten.
For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide.
After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.
For more information on multipart uploads, go to Multipart Upload Overview in the Amazon S3 User Guide .
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
General purpose bucket permissions - To perform a multipart upload with encryption using an Key Management Service key, the requester must have permission to the kms:Decrypt
and kms:GenerateDataKey
actions on the key. The requester must also have permissions for the kms:GenerateDataKey
action for the CreateMultipartUpload
API. Then, the requester needs permissions for the kms:Decrypt
action on the UploadPart
and UploadPartCopy
APIs.
These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information about KMS permissions, see Protecting data using server-side encryption with KMS in the Amazon S3 User Guide. For information about the permissions required to use the multipart upload API, see Multipart upload and permissions and Multipart upload API and permissions in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
General purpose bucket - To ensure that data is not corrupted traversing the network, specify the Content-MD5
header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error. If the upload request is signed with Signature Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256
header as a checksum instead of Content-MD5
. For more information see Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4).
Directory buckets - MD5 is not supported by directory buckets. You can use checksum algorithms to check object integrity.
General purpose bucket - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You have mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption with other key options. The option you use depends on whether you want to use KMS keys (SSE-KMS) or provide your own encryption key (SSE-C).
Server-side encryption is supported by the S3 Multipart Upload operations. Unless you are using a customer-provided encryption key (SSE-C), you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see CreateMultipartUpload.
If you request server-side encryption using a customer-provided encryption key (SSE-C) in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following request headers.
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information, see Using Server-Side Encryption in the Amazon S3 User Guide.
Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
).
Error Code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to UploadPart
:
Uploads a part by copying data from an existing object as data source. To specify the data source, you add the request header x-amz-copy-source
in your request. To specify a byte range, you add the request header x-amz-copy-source-range
in your request.
For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide.
Instead of copying data from an existing object as part data, you might use the UploadPart action to upload new data as a part of an object in your request.
You must initiate a multipart upload before you can upload any part. In response to your initiate request, Amazon S3 returns the upload ID, a unique identifier that you must include in your upload part request.
For conceptual information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide. For information about copying objects using a single atomic action vs. a multipart upload, see Operations on Objects in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.
All UploadPartCopy
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed. For more information, see REST Authentication.
Directory buckets - You must use IAM credentials to authenticate and authorize your access to the UploadPartCopy
API operation, instead of using the temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
You must have READ
access to the source object and WRITE
access to the destination bucket.
General purpose bucket permissions - You must have the permissions in a policy based on the bucket types of your source bucket and destination bucket in an UploadPartCopy
operation.
If the source object is in a general purpose bucket, you must have the s3:GetObject
permission to read the source object that is being copied.
If the destination bucket is a general purpose bucket, you must have the s3:PutObject
permission to write the object copy to the destination bucket.
To perform a multipart upload with encryption using an Key Management Service key, the requester must have permission to the kms:Decrypt
and kms:GenerateDataKey
actions on the key. The requester must also have permissions for the kms:GenerateDataKey
action for the CreateMultipartUpload
API. Then, the requester needs permissions for the kms:Decrypt
action on the UploadPart
and UploadPartCopy
APIs. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information about KMS permissions, see Protecting data using server-side encryption with KMS in the Amazon S3 User Guide. For information about the permissions required to use the multipart upload API, see Multipart upload and permissions and Multipart upload API and permissions in the Amazon S3 User Guide.
Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in an UploadPartCopy
operation.
If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to read the object. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the copy source bucket.
If the copy destination is a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to write the object to the destination. The s3express:SessionMode
condition key cannot be set to ReadOnly
on the copy destination.
If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
General purpose buckets - For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy
operation, see CopyObject and UploadPart.
Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide.
For directory buckets, when you perform a CreateMultipartUpload
operation and an UploadPartCopy
operation, the request headers you provide in the CreateMultipartUpload
request must match the default encryption configuration of the destination bucket.
S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through UploadPartCopy. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.
Error Code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
Error Code: InvalidRequest
Description: The specified copy source is not supported as a byte-range copy source.
HTTP Status Code: 400 Bad Request
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to UploadPartCopy
:
Uploads a part by copying data from an existing object as data source. To specify the data source, you add the request header x-amz-copy-source
in your request. To specify a byte range, you add the request header x-amz-copy-source-range
in your request.
For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide.
Instead of copying data from an existing object as part data, you might use the UploadPart action to upload new data as a part of an object in your request.
You must initiate a multipart upload before you can upload any part. In response to your initiate request, Amazon S3 returns the upload ID, a unique identifier that you must include in your upload part request.
For conceptual information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide. For information about copying objects using a single atomic action vs. a multipart upload, see Operations on Objects in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name
. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Available Local Zone for directory buckets in the Amazon S3 User Guide.
All UploadPartCopy
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed. For more information, see REST Authentication.
Directory buckets - You must use IAM credentials to authenticate and authorize your access to the UploadPartCopy
API operation, instead of using the temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
You must have READ
access to the source object and WRITE
access to the destination bucket.
General purpose bucket permissions - You must have the permissions in a policy based on the bucket types of your source bucket and destination bucket in an UploadPartCopy
operation.
If the source object is in a general purpose bucket, you must have the s3:GetObject
permission to read the source object that is being copied.
If the destination bucket is a general purpose bucket, you must have the s3:PutObject
permission to write the object copy to the destination bucket.
To perform a multipart upload with encryption using an Key Management Service key, the requester must have permission to the kms:Decrypt
and kms:GenerateDataKey
actions on the key. The requester must also have permissions for the kms:GenerateDataKey
action for the CreateMultipartUpload
API. Then, the requester needs permissions for the kms:Decrypt
action on the UploadPart
and UploadPartCopy
APIs. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information about KMS permissions, see Protecting data using server-side encryption with KMS in the Amazon S3 User Guide. For information about the permissions required to use the multipart upload API, see Multipart upload and permissions and Multipart upload API and permissions in the Amazon S3 User Guide.
Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in an UploadPartCopy
operation.
If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to read the object. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the copy source bucket.
If the copy destination is a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to write the object to the destination. The s3express:SessionMode
condition key cannot be set to ReadOnly
on the copy destination.
If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
General purpose buckets - For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy
operation, see CopyObject and UploadPart.
Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide.
For directory buckets, when you perform a CreateMultipartUpload
operation and an UploadPartCopy
operation, the request headers you provide in the CreateMultipartUpload
request must match the default encryption configuration of the destination bucket.
S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through UploadPartCopy. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.
Error Code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
Error Code: InvalidRequest
Description: The specified copy source is not supported as a byte-range copy source.
HTTP Status Code: 400 Bad Request
Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com
.
The following operations are related to UploadPartCopy
:
The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The Base64 encoded, 32-bit CRC-32 checksum
of the object. This checksum is only be present if the checksum was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The Base64 encoded, 32-bit CRC-32C
checksum of the object. This checksum is only present if the checksum was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
The Base64 encoded, 64-bit CRC-64NVME
checksum of the object. This checksum is present if the object was uploaded with the CRC-64NVME
checksum algorithm, or if the object was uploaded without a checksum (and Amazon S3 added the default checksum, CRC-64NVME
, to the uploaded object). For more information, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The Base64 encoded, 160-bit SHA-1
digest of the object. This will only be present if the object was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The Base64 encoded, 256-bit SHA-256
digest of the object. This will only be present if the object was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
The checksum type that is used to calculate the object’s checksum value. For more information, see Checking object integrity in the Amazon S3 User Guide.
" } }, "documentation":"Contains all the possible checksum or digest values for an object.
" @@ -2011,7 +2020,8 @@ "CRC32", "CRC32C", "SHA1", - "SHA256" + "SHA256", + "CRC64NVME" ] }, "ChecksumAlgorithmList":{ @@ -2021,12 +2031,20 @@ }, "ChecksumCRC32":{"type":"string"}, "ChecksumCRC32C":{"type":"string"}, + "ChecksumCRC64NVME":{"type":"string"}, "ChecksumMode":{ "type":"string", "enum":["ENABLED"] }, "ChecksumSHA1":{"type":"string"}, "ChecksumSHA256":{"type":"string"}, + "ChecksumType":{ + "type":"string", + "enum":[ + "COMPOSITE", + "FULL_OBJECT" + ] + }, "CloudFunction":{"type":"string"}, "CloudFunctionConfiguration":{ "type":"structure", @@ -2097,19 +2115,27 @@ }, "ChecksumCRC32":{ "shape":"ChecksumCRC32", - "documentation":"The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The Base64 encoded, 32-bit CRC-32 checksum
of the object. This checksum is only be present if the checksum was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The Base64 encoded, 32-bit CRC-32C
checksum of the object. This checksum is only present if the checksum was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 64-bit CRC-64NVME
checksum of the object. The CRC-64NVME
checksum is always a full object checksum. For more information, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The Base64 encoded, 160-bit SHA-1
digest of the object. This will only be present if the object was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The Base64 encoded, 256-bit SHA-256
digest of the object. This will only be present if the object was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
The checksum type, which determines how part-level checksums are combined to create an object-level checksum for multipart objects. You can use this header as a data integrity check to verify that the checksum type that is received is the same checksum type that was specified during the CreateMultipartUpload
request. For more information, see Checking object integrity in the Amazon S3 User Guide.
This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 32-bit CRC-32
checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 32-bit CRC-32C
checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 64-bit CRC-64NVME
checksum of the object. The CRC-64NVME
checksum is always a full object checksum. For more information, see Checking object integrity in the Amazon S3 User Guide.
This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 160-bit SHA-1
digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 256-bit SHA-256
digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
This header specifies the checksum type of the object, which determines how part-level checksums are combined to create an object-level checksum for multipart objects. You can use this header as a data integrity check to verify that the checksum type that is received is the same checksum that was specified. If the checksum type doesn’t match the checksum type that was specified for the object during the CreateMultipartUpload
request, it’ll result in a BadDigest
error. For more information, see Checking object integrity in the Amazon S3 User Guide.
The expected total object size of the multipart upload request. If there’s a mismatch between the specified object size value and the actual object size value, it results in an HTTP 400 InvalidRequest
error.
The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The Base64 encoded, 32-bit CRC-32
checksum of the part. This checksum is present if the multipart upload request was created with the CRC-32
checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The Base64 encoded, 32-bit CRC-32C
checksum of the part. This checksum is present if the multipart upload request was created with the CRC-32C
checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide.
The Base64 encoded, 64-bit CRC-64NVME
checksum of the part. This checksum is present if the multipart upload request was created with the CRC-64NVME
checksum algorithm to the uploaded object). For more information, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The Base64 encoded, 160-bit SHA-1
checksum of the part. This checksum is present if the multipart upload request was created with the SHA-1
checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The Base64 encoded, 256-bit SHA-256
checksum of the part. This checksum is present if the multipart upload request was created with the SHA-256
checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide.
If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs.
", + "documentation":"If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a Base64 encoded UTF-8 string holding JSON with the encryption context key-value pairs.
", "location":"header", "locationName":"x-amz-server-side-encryption-context" }, @@ -2668,21 +2716,29 @@ "shape":"LastModified", "documentation":"Creation date of the object.
" }, + "ChecksumType":{ + "shape":"ChecksumType", + "documentation":"The checksum type that is used to calculate the object’s checksum value. For more information, see Checking object integrity in the Amazon S3 User Guide.
" + }, "ChecksumCRC32":{ "shape":"ChecksumCRC32", - "documentation":"The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The Base64 encoded, 32-bit CRC-32
checksum of the object. This checksum is only present if the object was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The Base64 encoded, 32-bit CRC-32C
checksum of the object. This will only be present if the object was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
The Base64 encoded, 64-bit CRC-64NVME
checksum of the object. This checksum is present if the object being copied was uploaded with the CRC-64NVME
checksum algorithm, or if the object was uploaded without a checksum (and Amazon S3 added the default checksum, CRC-64NVME
, to the uploaded object). For more information, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The Base64 encoded, 160-bit SHA-1
digest of the object. This will only be present if the object was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The Base64 encoded, 256-bit SHA-256
digest of the object. This will only be present if the object was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
Container for all response elements.
" @@ -2700,19 +2756,23 @@ }, "ChecksumCRC32":{ "shape":"ChecksumCRC32", - "documentation":"The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 32-bit CRC-32
checksum of the part. For more information, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 32-bit CRC-32C
checksum of the part. For more information, see Checking object integrity in the Amazon S3 User Guide.
The Base64 encoded, 64-bit CRC-64NVME
checksum of the part. This checksum is present if the multipart upload request was created with the CRC-64NVME
checksum algorithm to the uploaded object). For more information, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 160-bit SHA-1
checksum of the part. For more information, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 256-bit SHA-256
checksum of the part. For more information, see Checking object integrity in the Amazon S3 User Guide.
Container for all response elements.
" @@ -2738,11 +2798,11 @@ "members":{ "LocationConstraint":{ "shape":"BucketLocationConstraint", - "documentation":"Specifies the Region where the bucket will be created. You might choose a Region to optimize latency, minimize costs, or address regulatory requirements. For example, if you reside in Europe, you will probably find it advantageous to create buckets in the Europe (Ireland) Region.
If you don't specify a Region, the bucket is created in the US East (N. Virginia) Region (us-east-1) by default.
For a list of the valid values for all of the Amazon Web Services Regions, see Regions and Endpoints.
This functionality is not supported for directory buckets.
Specifies the Region where the bucket will be created. You might choose a Region to optimize latency, minimize costs, or address regulatory requirements. For example, if you reside in Europe, you will probably find it advantageous to create buckets in the Europe (Ireland) Region. For more information, see Accessing a bucket in the Amazon S3 User Guide.
If you don't specify a Region, the bucket is created in the US East (N. Virginia) Region (us-east-1) by default.
This functionality is not supported for directory buckets.
Specifies the location where the bucket will be created.
Directory buckets - The location type is Availability Zone or Local Zone. To use the Local Zone location type, your account must be enabled for Dedicated Local Zones. Otherwise, you get an HTTP 403 Forbidden
error with the error code AccessDenied
. To learn more, see Enable accounts for Dedicated Local Zones in the Amazon S3 User Guide.
This functionality is only supported by directory buckets.
Specifies the location where the bucket will be created.
Directory buckets - The location type is Availability Zone or Local Zone. When the location type is Local Zone, your Local Zone must be in opt-in status. Otherwise, you get an HTTP 400 Bad Request
error with the error code Access denied
. To learn more about opt-in Local Zones, see Opt-in Dedicated Local Zonesin the Amazon S3 User Guide.
This functionality is only supported by directory buckets.
If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs.
", + "documentation":"If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs.
", "location":"header", "locationName":"x-amz-server-side-encryption-context" }, @@ -2944,6 +3004,12 @@ "documentation":"The algorithm that was used to create a checksum of the object.
", "location":"header", "locationName":"x-amz-checksum-algorithm" + }, + "ChecksumType":{ + "shape":"ChecksumType", + "documentation":"Indicates the checksum type that you want Amazon S3 to use to calculate the object’s checksum value. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "location":"header", + "locationName":"x-amz-checksum-type" } } }, @@ -3084,7 +3150,7 @@ }, "SSEKMSEncryptionContext":{ "shape":"SSEKMSEncryptionContext", - "documentation":"Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs.
Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.
", + "documentation":"Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs.
Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.
", "location":"header", "locationName":"x-amz-server-side-encryption-context" }, @@ -3134,6 +3200,12 @@ "documentation":"Indicates the algorithm that you want Amazon S3 to use to create the checksum for the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", "location":"header", "locationName":"x-amz-checksum-algorithm" + }, + "ChecksumType":{ + "shape":"ChecksumType", + "documentation":"Indicates the checksum type that you want Amazon S3 to use to calculate the object’s checksum value. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "location":"header", + "locationName":"x-amz-checksum-type" } } }, @@ -3155,7 +3227,7 @@ }, "SSEKMSEncryptionContext":{ "shape":"SSEKMSEncryptionContext", - "documentation":"If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject
operations on this object.
If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject
operations on this object.
Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject
operations on this object.
General purpose buckets - This value must be explicitly added during CopyObject
operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide.
Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.
", + "documentation":"Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of this header is a Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject
operations on this object.
General purpose buckets - This value must be explicitly added during CopyObject
operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide.
Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.
", "location":"header", "locationName":"x-amz-server-side-encryption-context" }, @@ -3816,7 +3888,7 @@ }, "ChecksumAlgorithm":{ "shape":"ChecksumAlgorithm", - "documentation":"Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
.
For the x-amz-checksum-algorithm
header, replace algorithm
with the supported algorithm from the following list:
CRC32
CRC32C
SHA1
SHA256
For more information, see Checking object integrity in the Amazon S3 User Guide.
If the individual checksum value you provide through x-amz-checksum-algorithm
doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm
, Amazon S3 ignores any provided ChecksumAlgorithm
parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm
.
If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
parameter.
Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
.
For the x-amz-checksum-algorithm
header, replace algorithm
with the supported algorithm from the following list:
CRC-32
CRC-32C
CRC-64NVME
SHA-1
SHA-256
For more information, see Checking object integrity in the Amazon S3 User Guide.
If the individual checksum value you provide through x-amz-checksum-algorithm
doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm
, Amazon S3 fails the request with a BadDigest
error.
If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
parameter.
Indicates which default minimum object size behavior is applied to the lifecycle configuration.
This parameter applies to general purpose buckets only. It is not supported for directory bucket lifecycle configurations.
all_storage_classes_128K
- Objects smaller than 128 KB will not transition to any storage class by default.
varies_by_storage_class
- Objects smaller than 128 KB will transition to Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, all other storage classes will prevent transitions smaller than 128 KB.
To customize the minimum object size for any transition you can add a filter that specifies a custom ObjectSizeGreaterThan
or ObjectSizeLessThan
in the body of your transition rule. Custom filters always take precedence over the default transition behavior.
Indicates which default minimum object size behavior is applied to the lifecycle configuration.
This parameter applies to general purpose buckets only. It isn't supported for directory bucket lifecycle configurations.
all_storage_classes_128K
- Objects smaller than 128 KB will not transition to any storage class by default.
varies_by_storage_class
- Objects smaller than 128 KB will transition to Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, all other storage classes will prevent transitions smaller than 128 KB.
To customize the minimum object size for any transition you can add a filter that specifies a custom ObjectSizeGreaterThan
or ObjectSizeLessThan
in the body of your transition rule. Custom filters always take precedence over the default transition behavior.
The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"The Base64 encoded, 32-bit CRC-32
checksum of the object. This checksum is only present if the object was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"The Base64 encoded, 32-bit CRC-32C
checksum of the object. This will only be present if the object was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
The Base64 encoded, 64-bit CRC-64NVME
checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"The Base64 encoded, 160-bit SHA-1
digest of the object. This will only be present if the object was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"The Base64 encoded, 256-bit SHA-256
digest of the object. This will only be present if the object was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
The checksum type, which determines how part-level checksums are combined to create an object-level checksum for multipart objects. You can use this header response to verify that the checksum type that is received is the same checksum type that was specified in the CreateMultipartUpload
request. For more information, see Checking object integrity in the Amazon S3 User Guide.
This is set to the number of metadata entries not returned in the headers that are prefixed with x-amz-meta-
. This can happen if you create metadata using an API like SOAP that supports more flexible metadata than the REST API. For example, using SOAP, you can create metadata whose values are not legal HTTP headers.
This functionality is not supported for directory buckets.
The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"The Base64 encoded, 32-bit CRC-32 checksum
of the object. This checksum is only be present if the checksum was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"The Base64 encoded, 32-bit CRC-32C
checksum of the object. This checksum is only present if the checksum was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
The Base64 encoded, 64-bit CRC-64NVME
checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"The Base64 encoded, 160-bit SHA-1
digest of the object. This will only be present if the object was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"The Base64 encoded, 256-bit SHA-256
digest of the object. This will only be present if the object was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
The checksum type, which determines how part-level checksums are combined to create an object-level checksum for multipart objects. You can use this header response to verify that the checksum type that is received is the same checksum type that was specified in CreateMultipartUpload
request. For more information, see Checking object integrity in the Amazon S3 User Guide.
An entity tag (ETag) is an opaque identifier assigned by a web server to a specific version of a resource found at a URL.
", @@ -7533,6 +7629,10 @@ "ChecksumAlgorithm":{ "shape":"ChecksumAlgorithm", "documentation":"The algorithm that was used to create a checksum of the object.
" + }, + "ChecksumType":{ + "shape":"ChecksumType", + "documentation":"The checksum type, which determines how part-level checksums are combined to create an object-level checksum for multipart objects. You can use this header response to verify that the checksum type that is received is the same checksum type that was specified in CreateMultipartUpload
request. For more information, see Checking object integrity in the Amazon S3 User Guide.
The name of the location where the bucket will be created.
For directory buckets, the name of the location is the Zone ID of the Availability Zone (AZ) or Local Zone (LZ) where the bucket will be created. An example AZ ID value is usw2-az1
.
Specifies the location where the bucket will be created.
For directory buckets, the location type is Availability Zone or Local Zone. For more information about directory buckets, see Working with directory buckets in the Amazon S3 User Guide.
This functionality is only supported by directory buckets.
Specifies the location where the bucket will be created.
For directory buckets, the location type is Availability Zone or Local Zone. For more information about directory buckets, see Directory buckets in the Amazon S3 User Guide.
This functionality is only supported by directory buckets.
The algorithm that was used to create a checksum of the object.
" + }, + "ChecksumType":{ + "shape":"ChecksumType", + "documentation":"The checksum type that is used to calculate the object’s checksum value. For more information, see Checking object integrity in the Amazon S3 User Guide.
" } }, "documentation":"Container for the MultipartUpload
for the Amazon S3 object.
The algorithm that was used to create a checksum of the object.
" }, + "ChecksumType":{ + "shape":"ChecksumType", + "documentation":"The checksum type that is used to calculate the object’s checksum value. For more information, see Checking object integrity in the Amazon S3 User Guide.
" + }, "Size":{ "shape":"Size", "documentation":"Size in bytes of the object
" @@ -8225,19 +8334,23 @@ }, "ChecksumCRC32":{ "shape":"ChecksumCRC32", - "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The Base64 encoded, 32-bit CRC-32
checksum of the part. This checksum is present if the multipart upload request was created with the CRC-32
checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The Base64 encoded, 32-bit CRC-32C
checksum of the part. This checksum is present if the multipart upload request was created with the CRC-32C
checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide.
The Base64 encoded, 64-bit CRC-64NVME
checksum of the part. This checksum is present if the multipart upload request was created with the CRC-64NVME
checksum algorithm, or if the object was uploaded without a checksum (and Amazon S3 added the default checksum, CRC-64NVME
, to the uploaded object). For more information, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The Base64 encoded, 160-bit SHA-1
checksum of the part. This checksum is present if the multipart upload request was created with the SHA-1
checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The Base64 encoded, 256-bit SHA-256
checksum of the part. This checksum is present if the multipart upload request was created with the SHA-256
checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide.
A container for elements related to an individual part.
" @@ -8281,6 +8394,10 @@ "shape":"ChecksumAlgorithmList", "documentation":"The algorithm that was used to create a checksum of the object.
" }, + "ChecksumType":{ + "shape":"ChecksumType", + "documentation":"The checksum type that is used to calculate the object’s checksum value. For more information, see Checking object integrity in the Amazon S3 User Guide.
" + }, "Size":{ "shape":"Size", "documentation":"Size in bytes of the object.
" @@ -8428,19 +8545,23 @@ }, "ChecksumCRC32":{ "shape":"ChecksumCRC32", - "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The Base64 encoded, 32-bit CRC-32
checksum of the part. This checksum is present if the object was uploaded with the CRC-32
checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The Base64 encoded, 32-bit CRC-32C
checksum of the part. This checksum is present if the object was uploaded with the CRC-32C
checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide.
The Base64 encoded, 64-bit CRC-64NVME
checksum of the part. This checksum is present if the multipart upload request was created with the CRC-64NVME
checksum algorithm, or if the object was uploaded without a checksum (and Amazon S3 added the default checksum, CRC-64NVME
, to the uploaded object). For more information, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The Base64 encoded, 160-bit SHA-1
checksum of the part. This checksum is present if the object was uploaded with the SHA-1
checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide.
This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The Base64 encoded, 256-bit SHA-256
checksum of the part. This checksum is present if the object was uploaded with the SHA-256
checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide.
Container for elements related to a part.
" @@ -8636,7 +8757,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864.
For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", + "documentation":"The Base64 encoded 128-bit MD5
digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864.
For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", "location":"header", "locationName":"Content-MD5" }, @@ -8743,7 +8864,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864.
For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", + "documentation":"The Base64 encoded 128-bit MD5
digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864.
For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", "location":"header", "locationName":"Content-MD5" }, @@ -8778,7 +8899,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"The base64-encoded 128-bit MD5 digest of the server-side encryption configuration.
For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
This functionality is not supported for directory buckets.
The Base64 encoded 128-bit MD5
digest of the server-side encryption configuration.
For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
This functionality is not supported for directory buckets.
Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
.
For the x-amz-checksum-algorithm
header, replace algorithm
with the supported algorithm from the following list:
CRC32
CRC32C
SHA1
SHA256
For more information, see Checking object integrity in the Amazon S3 User Guide.
If the individual checksum value you provide through x-amz-checksum-algorithm
doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm
, Amazon S3 ignores any provided ChecksumAlgorithm
parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm
.
For directory buckets, when you use Amazon Web Services SDKs, CRC32
is the default checksum algorithm that's used for performance.
Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
.
For the x-amz-checksum-algorithm
header, replace algorithm
with the supported algorithm from the following list:
CRC-32
CRC-32C
CRC-64NVME
SHA-1
SHA-256
For more information, see Checking object integrity in the Amazon S3 User Guide.
If the individual checksum value you provide through x-amz-checksum-algorithm
doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm
, Amazon S3 fails the request with a BadDigest
error.
For directory buckets, when you use Amazon Web Services SDKs, CRC32
is the default checksum algorithm that's used for performance.
The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.
For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", + "documentation":"The Base64 encoded 128-bit MD5
digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.
For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", "location":"header", "locationName":"Content-MD5" }, @@ -9249,7 +9370,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.
For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", + "documentation":"The Base64 encoded 128-bit MD5
digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.
For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", "location":"header", "locationName":"Content-MD5" }, @@ -9290,7 +9411,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.
For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", + "documentation":"The Base64 encoded 128-bit MD5
digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.
For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", "location":"header", "locationName":"Content-MD5" }, @@ -9331,7 +9452,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":">The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.
For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", + "documentation":">The Base64 encoded 128-bit MD5
digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.
For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", "location":"header", "locationName":"Content-MD5" }, @@ -9378,7 +9499,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.
For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", + "documentation":"The Base64 encoded 128-bit MD5
digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.
For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", "location":"header", "locationName":"Content-MD5" }, @@ -9441,7 +9562,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864.>
For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", + "documentation":"The Base64 encoded 128-bit MD5
digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864.>
For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", "location":"header", "locationName":"Content-MD5" }, @@ -9652,28 +9773,40 @@ }, "ChecksumCRC32":{ "shape":"ChecksumCRC32", - "documentation":"The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"The Base64 encoded, 32-bit CRC-32 checksum
of the object. This checksum is only be present if the checksum was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"The Base64 encoded, 32-bit CRC-32C
checksum of the object. This checksum is only present if the checksum was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
The Base64 encoded, 64-bit CRC-64NVME
checksum of the object. This header is present if the object was uploaded with the CRC-64NVME
checksum algorithm, or if it was uploaded without a checksum (and Amazon S3 added the default checksum, CRC-64NVME
, to the uploaded object). For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"The Base64 encoded, 160-bit SHA-1
digest of the object. This will only be present if the object was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"The Base64 encoded, 256-bit SHA-256
digest of the object. This will only be present if the object was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
This header specifies the checksum type of the object, which determines how part-level checksums are combined to create an object-level checksum for multipart objects. For PutObject
uploads, the checksum type is always FULL_OBJECT
. You can use this header as a data integrity check to verify that the checksum type that is received is the same checksum that was specified. For more information, see Checking object integrity in the Amazon S3 User Guide.
The server-side encryption algorithm used when you store this object in Amazon S3.
", @@ -9706,7 +9839,7 @@ }, "SSEKMSEncryptionContext":{ "shape":"SSEKMSEncryptionContext", - "documentation":"If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject
operations on this object.
If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject
operations on this object.
The size of the object in bytes. This will only be present if you append to an object.
This functionality is only supported for objects in the Amazon S3 Express One Zone storage class in directory buckets.
The size of the object in bytes. This value is only be present if you append to an object.
This functionality is only supported for objects in the Amazon S3 Express One Zone storage class in directory buckets.
The base64-encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication.
The Content-MD5
or x-amz-sdk-checksum-algorithm
header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide.
This functionality is not supported for directory buckets.
The Base64 encoded 128-bit MD5
digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication.
The Content-MD5
or x-amz-sdk-checksum-algorithm
header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide.
This functionality is not supported for directory buckets.
Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
.
For the x-amz-checksum-algorithm
header, replace algorithm
with the supported algorithm from the following list:
CRC32
CRC32C
SHA1
SHA256
For more information, see Checking object integrity in the Amazon S3 User Guide.
If the individual checksum value you provide through x-amz-checksum-algorithm
doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm
, Amazon S3 ignores any provided ChecksumAlgorithm
parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm
.
The Content-MD5
or x-amz-sdk-checksum-algorithm
header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide.
For directory buckets, when you use Amazon Web Services SDKs, CRC32
is the default checksum algorithm that's used for performance.
Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
.
For the x-amz-checksum-algorithm
header, replace algorithm
with the supported algorithm from the following list:
CRC-32
CRC-32C
CRC-64NVME
SHA-1
SHA-256
For more information, see Checking object integrity in the Amazon S3 User Guide.
If the individual checksum value you provide through x-amz-checksum-algorithm
doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm
, Amazon S3 fails the request with a BadDigest
error.
The Content-MD5
or x-amz-sdk-checksum-algorithm
header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide.
For directory buckets, when you use Amazon Web Services SDKs, CRC32
is the default checksum algorithm that's used for performance.
This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 32-bit CRC-32
checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 32-bit CRC-32C
checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 64-bit CRC-64NVME
checksum of the object. The CRC-64NVME
checksum is always a full object checksum. For more information, see Checking object integrity in the Amazon S3 User Guide.
This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 160-bit SHA-1
digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 256-bit SHA-256
digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject
operations on this object.
General purpose buckets - This value must be explicitly added during CopyObject
operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide.
Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.
", + "documentation":"Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of this header is a Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject
operations on this object.
General purpose buckets - This value must be explicitly added during CopyObject
operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide.
Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.
", "location":"header", "locationName":"x-amz-server-side-encryption-context" }, @@ -11622,25 +11761,31 @@ }, "ChecksumCRC32":{ "shape":"ChecksumCRC32", - "documentation":"The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"The Base64 encoded, 32-bit CRC-32 checksum
of the object. This checksum is only be present if the checksum was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"The Base64 encoded, 32-bit CRC-32C
checksum of the object. This checksum is only present if the checksum was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 64-bit CRC-64NVME
checksum of the part. For more information, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"The Base64 encoded, 160-bit SHA-1
digest of the object. This will only be present if the object was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"The Base64 encoded, 256-bit SHA-256
digest of the object. This will only be present if the object was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
The base64-encoded 128-bit MD5 digest of the part data. This parameter is auto-populated when using the command from the CLI. This parameter is required if object lock parameters are specified.
This functionality is not supported for directory buckets.
The Base64 encoded 128-bit MD5 digest of the part data. This parameter is auto-populated when using the command from the CLI. This parameter is required if object lock parameters are specified.
This functionality is not supported for directory buckets.
This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 32-bit CRC-32
checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 32-bit CRC-32C
checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 64-bit CRC-64NVME
checksum of the part. For more information, see Checking object integrity in the Amazon S3 User Guide.
This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 160-bit SHA-1
digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 256-bit SHA-256
digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 32-bit CRC-32 checksum of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject
request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide.
Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail.
", + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the Base64 encoded, 32-bit CRC-32
checksum of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject
request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide.
Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail.
", "location":"header", "locationName":"x-amz-fwd-header-x-amz-checksum-crc32" }, "ChecksumCRC32C":{ "shape":"ChecksumCRC32C", - "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 32-bit CRC-32C checksum of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject
request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide.
Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail.
", + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the Base64 encoded, 32-bit CRC-32C
checksum of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject
request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide.
Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail.
", "location":"header", "locationName":"x-amz-fwd-header-x-amz-checksum-crc32c" }, + "ChecksumCRC64NVME":{ + "shape":"ChecksumCRC64NVME", + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 64-bit CRC-64NVME
checksum of the part. For more information, see Checking object integrity in the Amazon S3 User Guide.
This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 160-bit SHA-1 digest of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject
request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide.
Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail.
", + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the Base64 encoded, 160-bit SHA-1
digest of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject
request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide.
Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail.
", "location":"header", "locationName":"x-amz-fwd-header-x-amz-checksum-sha1" }, "ChecksumSHA256":{ "shape":"ChecksumSHA256", - "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 256-bit SHA-256 digest of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject
request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide.
Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail.
", + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the Base64 encoded, 256-bit SHA-256
digest of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject
request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide.
Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail.
", "location":"header", "locationName":"x-amz-fwd-header-x-amz-checksum-sha256" }, diff --git a/botocore/data/s3control/2018-08-20/service-2.json b/botocore/data/s3control/2018-08-20/service-2.json index f7f4126612..a234cfc299 100644 --- a/botocore/data/s3control/2018-08-20/service-2.json +++ b/botocore/data/s3control/2018-08-20/service-2.json @@ -3380,7 +3380,7 @@ "type":"string", "max":1024, "min":1, - "pattern":"(arn:(aws[a-zA-Z-]*)?:lambda:)?([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_]+)(:(\\$LATEST|[a-zA-Z0-9-_]+))?" + "pattern":"(arn:(aws[a-zA-Z-]*)?:lambda:)?([a-z]{2}((-gov)|(-iso([a-z]?)))?-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_]+)(:(\\$LATEST|[a-zA-Z0-9-_]+))?" }, "GeneratedManifestEncryption":{ "type":"structure", @@ -5102,7 +5102,7 @@ }, "ExpiredObjectDeleteMarker":{ "shape":"ExpiredObjectDeleteMarker", - "documentation":"Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. If set to true, the delete marker will be expired. If set to false, the policy takes no action. This cannot be specified with Days or Date in a Lifecycle Expiration Policy.
" + "documentation":"Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. If set to true, the delete marker will be expired. If set to false, the policy takes no action. This cannot be specified with Days or Date in a Lifecycle Expiration Policy. To learn more about delete markers, see Working with delete markers.
" } }, "documentation":"The container of the Outposts bucket lifecycle expiration.
" @@ -7408,7 +7408,7 @@ "members":{ "TargetResource":{ "shape":"S3RegionalOrS3ExpressBucketArnString", - "documentation":"Specifies the destination bucket Amazon Resource Name (ARN) for the batch copy operation.
General purpose buckets - For example, to copy objects to a general purpose bucket named destinationBucket
, set the TargetResource
property to arn:aws:s3:::destinationBucket
.
Directory buckets - For example, to copy objects to a directory bucket named destinationBucket
in the Availability Zone; identified by the AZ ID usw2-az1
, set the TargetResource
property to arn:aws:s3express:region:account_id:/bucket/destination_bucket_base_name--usw2-az1--x-s3
.
Specifies the destination bucket Amazon Resource Name (ARN) for the batch copy operation.
General purpose buckets - For example, to copy objects to a general purpose bucket named destinationBucket
, set the TargetResource
property to arn:aws:s3:::destinationBucket
.
Directory buckets - For example, to copy objects to a directory bucket named destinationBucket
in the Availability Zone identified by the AZ ID usw2-az1
, set the TargetResource
property to arn:aws:s3express:region:account_id:/bucket/destination_bucket_base_name--usw2-az1--x-s3
. A directory bucket as a destination bucket can be in Availability Zone or Local Zone.
Copying objects across different Amazon Web Services Regions isn't supported when the source or destination bucket is in Amazon Web Services Local Zones. The source and destination buckets must have the same parent Amazon Web Services Region. Otherwise, you get an HTTP 400 Bad Request
error with the error code InvalidRequest
.
Creates a namespace. A namespace is a logical grouping of tables within your table bucket, which you can use to organize tables. For more information, see Table namespaces.
" + "documentation":"Creates a namespace. A namespace is a logical grouping of tables within your table bucket, which you can use to organize tables. For more information, see Create a namespace in the Amazon Simple Storage Service User Guide.
You must have the s3tables:CreateNamespace
permission to use this operation.
Creates a new table associated with the given namespace in a table bucket.
" + "documentation":"Creates a new table associated with the given namespace in a table bucket. For more information, see Creating an Amazon S3 table in the Amazon Simple Storage Service User Guide.
You must have the s3tables:CreateTable
permission to use this operation.
Additionally, you must have the s3tables:PutTableData
permission to use this operation with the optional metadata
request parameter.
Creates a table bucket.
" + "documentation":"Creates a table bucket. For more information, see Creating a table bucket in the Amazon Simple Storage Service User Guide.
You must have the s3tables:CreateTableBucket
permission to use this operation.
Deletes a namespace.
", + "documentation":"Deletes a namespace. For more information, see Delete a namespace in the Amazon Simple Storage Service User Guide.
You must have the s3tables:DeleteNamespace
permission to use this operation.
Deletes a table.
", + "documentation":"Deletes a table. For more information, see Deleting an Amazon S3 table in the Amazon Simple Storage Service User Guide.
You must have the s3tables:DeleteTable
permission to use this operation.
Deletes a table bucket.
", + "documentation":"Deletes a table bucket. For more information, see Deleting a table bucket in the Amazon Simple Storage Service User Guide.
You must have the s3tables:DeleteTableBucket
permission to use this operation.
Deletes a table bucket policy.
", + "documentation":"Deletes a table bucket policy. For more information, see Deleting a table bucket policy in the Amazon Simple Storage Service User Guide.
You must have the s3tables:DeleteTableBucketPolicy
permission to use this operation.
Deletes a table policy.
", + "documentation":"Deletes a table policy. For more information, see Deleting a table policy in the Amazon Simple Storage Service User Guide.
You must have the s3tables:DeleteTablePolicy
permission to use this operation.
Gets details about a namespace.
" + "documentation":"Gets details about a namespace. For more information, see Table namespaces in the Amazon Simple Storage Service User Guide.
You must have the s3tables:GetNamespace
permission to use this operation.
Gets details about a table.
" + "documentation":"Gets details about a table. For more information, see S3 Tables in the Amazon Simple Storage Service User Guide.
You must have the s3tables:GetTable
permission to use this operation.
Gets details on a table bucket.
" + "documentation":"Gets details on a table bucket. For more information, see Viewing details about an Amazon S3 table bucket in the Amazon Simple Storage Service User Guide.
You must have the s3tables:GetTableBucket
permission to use this operation.
Gets details about a maintenance configuration for a given table bucket.
" + "documentation":"Gets details about a maintenance configuration for a given table bucket. For more information, see Amazon S3 table bucket maintenance in the Amazon Simple Storage Service User Guide.
You must have the s3tables:GetTableBucketMaintenanceConfiguration
permission to use this operation.
Gets details about a table bucket policy.
" + "documentation":"Gets details about a table bucket policy. For more information, see Viewing a table bucket policy in the Amazon Simple Storage Service User Guide.
You must have the s3tables:GetTableBucketPolicy
permission to use this operation.
Gets details about the maintenance configuration of a table.
" + "documentation":"Gets details about the maintenance configuration of a table. For more information, see S3 Tables maintenance in the Amazon Simple Storage Service User Guide.
You must have the s3tables:GetTableMaintenanceConfiguration
permission to use this operation.
Gets the status of a maintenance job for a table.
" + "documentation":"Gets the status of a maintenance job for a table. For more information, see S3 Tables maintenance in the Amazon Simple Storage Service User Guide.
You must have the s3tables:GetTableMaintenanceJobStatus
permission to use this operation.
Gets the location of the table metadata.
" + "documentation":"Gets the location of the table metadata.
You must have the s3tables:GetTableMetadataLocation
permission to use this operation.
Gets details about a table policy.
" + "documentation":"Gets details about a table policy. For more information, see Viewing a table policy in the Amazon Simple Storage Service User Guide.
You must have the s3tables:GetTablePolicy
permission to use this operation.
Lists the namespaces within a table bucket.
" + "documentation":"Lists the namespaces within a table bucket. For more information, see Table namespaces in the Amazon Simple Storage Service User Guide.
You must have the s3tables:ListNamespaces
permission to use this operation.
Lists table buckets for your account.
" + "documentation":"Lists table buckets for your account. For more information, see S3 Table buckets in the Amazon Simple Storage Service User Guide.
You must have the s3tables:ListTableBuckets
permission to use this operation.
List tables in the given table bucket.
" + "documentation":"List tables in the given table bucket. For more information, see S3 Tables in the Amazon Simple Storage Service User Guide.
You must have the s3tables:ListTables
permission to use this operation.
Creates a new maintenance configuration or replaces an existing maintenance configuration for a table bucket.
" + "documentation":"Creates a new maintenance configuration or replaces an existing maintenance configuration for a table bucket. For more information, see Amazon S3 table bucket maintenance in the Amazon Simple Storage Service User Guide.
You must have the s3tables:PutTableBucketMaintenanceConfiguration
permission to use this operation.
Creates a new maintenance configuration or replaces an existing table bucket policy for a table bucket.
", + "documentation":"Creates a new maintenance configuration or replaces an existing table bucket policy for a table bucket. For more information, see Adding a table bucket policy in the Amazon Simple Storage Service User Guide.
You must have the s3tables:PutTableBucketPolicy
permission to use this operation.
Creates a new maintenance configuration or replaces an existing maintenance configuration for a table.
" + "documentation":"Creates a new maintenance configuration or replaces an existing maintenance configuration for a table. For more information, see S3 Tables maintenance in the Amazon Simple Storage Service User Guide.
You must have the s3tables:PutTableMaintenanceConfiguration
permission to use this operation.
Creates a new maintenance configuration or replaces an existing table policy for a table.
", + "documentation":"Creates a new maintenance configuration or replaces an existing table policy for a table. For more information, see Adding a table policy in the Amazon Simple Storage Service User Guide.
You must have the s3tables:PutTablePolicy
permission to use this operation.
Renames a table or a namespace.
" + "documentation":"Renames a table or a namespace. For more information, see S3 Tables in the Amazon Simple Storage Service User Guide.
You must have the s3tables:RenameTable
permission to use this operation.
Updates the metadata location for a table.
" + "documentation":"Updates the metadata location for a table. The metadata location of a table must be an S3 URI that begins with the table's warehouse location. The metadata location for an Apache Iceberg table must end with .metadata.json
, or if the metadata file is Gzip-compressed, .metadata.json.gz
.
You must have the s3tables:UpdateTableMetadataLocation
permission to use this operation.
The format for the table.
" + }, + "metadata":{ + "shape":"TableMetadata", + "documentation":"The metadata for the table.
" } } }, @@ -690,7 +698,7 @@ "members":{ "tableBucketARN":{ "shape":"TableBucketARN", - "documentation":"The Amazon Resource Number (ARN) of the table bucket.
", + "documentation":"The Amazon Resource Name (ARN) of the table bucket.
", "location":"uri", "locationName":"tableBucketARN" } @@ -718,7 +726,7 @@ "members":{ "tableBucketARN":{ "shape":"TableBucketARN", - "documentation":"The Amazon Resource Number (ARN) of the table bucket that contains the table.
", + "documentation":"The Amazon Resource Name (ARN) of the table bucket that contains the table.
", "location":"uri", "locationName":"tableBucketARN" }, @@ -866,7 +874,7 @@ "members":{ "tableBucketARN":{ "shape":"TableBucketARN", - "documentation":"The Amazon Resource Number (ARN) of the table bucket.
", + "documentation":"The Amazon Resource Name (ARN) of the table bucket.
", "location":"uri", "locationName":"tableBucketARN" } @@ -878,7 +886,7 @@ "members":{ "resourcePolicy":{ "shape":"ResourcePolicy", - "documentation":"The name of the resource policy.
" + "documentation":"The JSON
that defines the policy.
The Amazon Resource Number (ARN) of the table bucket that contains the table.
", + "documentation":"The Amazon Resource Name (ARN) of the table bucket that contains the table.
", "location":"uri", "locationName":"tableBucketARN" }, @@ -1094,7 +1102,7 @@ "members":{ "resourcePolicy":{ "shape":"ResourcePolicy", - "documentation":"The name of the resource policy.
" + "documentation":"The JSON
that defines the policy.
Contains details about the compaction settings for an Iceberg table.
" }, + "IcebergMetadata":{ + "type":"structure", + "required":["schema"], + "members":{ + "schema":{ + "shape":"IcebergSchema", + "documentation":"The schema for an Iceberg table.
" + } + }, + "documentation":"Contains details about the metadata for an Iceberg table.
" + }, + "IcebergSchema":{ + "type":"structure", + "required":["fields"], + "members":{ + "fields":{ + "shape":"SchemaFieldList", + "documentation":"The schema fields for the table
" + } + }, + "documentation":"Contains details about the schema for an Iceberg table.
" + }, "IcebergSnapshotManagementSettings":{ "type":"structure", "members":{ @@ -1373,7 +1403,7 @@ "members":{ "tableBucketARN":{ "shape":"TableBucketARN", - "documentation":"The Amazon resource Number (ARN) of the table bucket.
", + "documentation":"The Amazon resource Name (ARN) of the table bucket.
", "location":"uri", "locationName":"tableBucketARN" }, @@ -1538,13 +1568,13 @@ "members":{ "tableBucketARN":{ "shape":"TableBucketARN", - "documentation":"The Amazon Resource Number (ARN) of the table bucket.
", + "documentation":"The Amazon Resource Name (ARN) of the table bucket.
", "location":"uri", "locationName":"tableBucketARN" }, "resourcePolicy":{ "shape":"ResourcePolicy", - "documentation":"The name of the resource policy.
" + "documentation":"The JSON
that defines the policy.
The Amazon Resource Number (ARN) of the table bucket that contains the table.
", + "documentation":"The Amazon Resource Name (ARN) of the table bucket that contains the table.
", "location":"uri", "locationName":"tableBucketARN" }, @@ -1617,7 +1647,7 @@ }, "resourcePolicy":{ "shape":"ResourcePolicy", - "documentation":"The name of the resource policy.
" + "documentation":"The JSON
that defines the policy.
The name of the field.
" + }, + "type":{ + "shape":"String", + "documentation":"The field type. S3 Tables supports all Apache Iceberg primitive types. For more information, see the Apache Iceberg documentation.
" + }, + "required":{ + "shape":"Boolean", + "documentation":"A Boolean value that specifies whether values are required for each row in this field. By default, this is false
and null values are allowed in the field. If this is true
the field does not allow null values.
Contains details about a schema field.
" + }, + "SchemaFieldList":{ + "type":"list", + "member":{"shape":"SchemaField"} + }, "String":{"type":"string"}, "SyntheticTimestamp_date_time":{ "type":"timestamp", @@ -1732,7 +1788,7 @@ "members":{ "arn":{ "shape":"TableBucketARN", - "documentation":"The Amazon Resource Number (ARN) of the table bucket.
" + "documentation":"The Amazon Resource Name (ARN) of the table bucket.
" }, "name":{ "shape":"TableBucketName", @@ -1826,6 +1882,17 @@ "icebergSnapshotManagement" ] }, + "TableMetadata":{ + "type":"structure", + "members":{ + "iceberg":{ + "shape":"IcebergMetadata", + "documentation":"Contains details about the metadata of an Iceberg table.
" + } + }, + "documentation":"Contains details about the table metadata.
", + "union":true + }, "TableName":{ "type":"string", "max":255, @@ -1857,7 +1924,7 @@ }, "tableARN":{ "shape":"TableARN", - "documentation":"The Amazon Resource Number (ARN) of the table.
" + "documentation":"The Amazon Resource Name (ARN) of the table.
" }, "createdAt":{ "shape":"SyntheticTimestamp_date_time", @@ -1947,7 +2014,7 @@ }, "tableARN":{ "shape":"TableARN", - "documentation":"The Amazon Resource Number (ARN) of the table.
" + "documentation":"The Amazon Resource Name (ARN) of the table.
" }, "namespace":{ "shape":"NamespaceList", diff --git a/botocore/data/sagemaker/2017-07-24/service-2.json b/botocore/data/sagemaker/2017-07-24/service-2.json index 622ea7e6b3..957900e498 100644 --- a/botocore/data/sagemaker/2017-07-24/service-2.json +++ b/botocore/data/sagemaker/2017-07-24/service-2.json @@ -29903,7 +29903,7 @@ }, "Image":{ "shape":"ContainerImage", - "documentation":"The Amazon EC2 Container Registry path where inference code is stored.
If you are using your own custom algorithm instead of an algorithm provided by SageMaker, the inference code must meet SageMaker requirements. SageMaker supports both registry/repository[:tag]
and registry/repository[@digest]
image path formats. For more information, see Using Your Own Algorithms with Amazon SageMaker.
The Amazon Elastic Container Registry (Amazon ECR) path where inference code is stored.
If you are using your own custom algorithm instead of an algorithm provided by SageMaker, the inference code must meet SageMaker requirements. SageMaker supports both registry/repository[:tag]
and registry/repository[@digest]
image path formats. For more information, see Using Your Own Algorithms with Amazon SageMaker.
The ARN of the image version created on the instance.
" + "documentation":"The ARN of the image version created on the instance. To clear the value set for SageMakerImageVersionArn
, pass None
as the value.
The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource.
" } }, - "documentation":"Specifies the ARN's of a SageMaker AI image and SageMaker AI image version, and the instance type that the version runs on.
" + "documentation":"Specifies the ARN's of a SageMaker AI image and SageMaker AI image version, and the instance type that the version runs on.
When both SageMakerImageVersionArn
and SageMakerImageArn
are passed, SageMakerImageVersionArn
is used. Any updates to SageMakerImageArn
will not take effect if SageMakerImageVersionArn
already exists in the ResourceSpec
because SageMakerImageVersionArn
always takes precedence. To clear the value set for SageMakerImageVersionArn
, pass None
as the value.
Automatically enables Amazon Security Lake for new member accounts in your organization. Security Lake is not automatically enabled for any existing member accounts in your organization.
" + "documentation":"Automatically enables Amazon Security Lake for new member accounts in your organization. Security Lake is not automatically enabled for any existing member accounts in your organization.
This operation merges the new data lake organization configuration with the existing configuration for Security Lake in your organization. If you want to create a new data lake organization configuration, you must delete the existing one using DeleteDataLakeOrganizationConfiguration.
" }, "CreateSubscriber":{ "name":"CreateSubscriber", @@ -815,7 +815,7 @@ }, "eventClasses":{ "shape":"OcsfEventClassList", - "documentation":"The Open Cybersecurity Schema Framework (OCSF) event classes which describes the type of data that the custom source will send to Security Lake. The supported event classes are:
ACCESS_ACTIVITY
FILE_ACTIVITY
KERNEL_ACTIVITY
KERNEL_EXTENSION
MEMORY_ACTIVITY
MODULE_ACTIVITY
PROCESS_ACTIVITY
REGISTRY_KEY_ACTIVITY
REGISTRY_VALUE_ACTIVITY
RESOURCE_ACTIVITY
SCHEDULED_JOB_ACTIVITY
SECURITY_FINDING
ACCOUNT_CHANGE
AUTHENTICATION
AUTHORIZATION
ENTITY_MANAGEMENT_AUDIT
DHCP_ACTIVITY
NETWORK_ACTIVITY
DNS_ACTIVITY
FTP_ACTIVITY
HTTP_ACTIVITY
RDP_ACTIVITY
SMB_ACTIVITY
SSH_ACTIVITY
CONFIG_STATE
INVENTORY_INFO
EMAIL_ACTIVITY
API_ACTIVITY
CLOUD_API
The Open Cybersecurity Schema Framework (OCSF) event classes which describes the type of data that the custom source will send to Security Lake. For the list of supported event classes, see the Amazon Security Lake User Guide.
" }, "sourceName":{ "shape":"CustomLogSourceName", @@ -1290,7 +1290,7 @@ }, "eventClasses":{ "shape":"OcsfEventClassList", - "documentation":"The Open Cybersecurity Schema Framework (OCSF) event classes which describes the type of data that the custom source will send to Security Lake. The supported event classes are:
ACCESS_ACTIVITY
FILE_ACTIVITY
KERNEL_ACTIVITY
KERNEL_EXTENSION
MEMORY_ACTIVITY
MODULE_ACTIVITY
PROCESS_ACTIVITY
REGISTRY_KEY_ACTIVITY
REGISTRY_VALUE_ACTIVITY
RESOURCE_ACTIVITY
SCHEDULED_JOB_ACTIVITY
SECURITY_FINDING
ACCOUNT_CHANGE
AUTHENTICATION
AUTHORIZATION
ENTITY_MANAGEMENT_AUDIT
DHCP_ACTIVITY
NETWORK_ACTIVITY
DNS_ACTIVITY
FTP_ACTIVITY
HTTP_ACTIVITY
RDP_ACTIVITY
SMB_ACTIVITY
SSH_ACTIVITY
CONFIG_STATE
INVENTORY_INFO
EMAIL_ACTIVITY
API_ACTIVITY
CLOUD_API
The Open Cybersecurity Schema Framework (OCSF) event classes describes the type of data that the custom source will send to Security Lake. For the list of supported event classes, see Supported OCSF Event classes in the Amazon Security Lake User Guide.
" }, "sourceName":{ "shape":"String", diff --git a/botocore/data/sesv2/2019-09-27/service-2.json b/botocore/data/sesv2/2019-09-27/service-2.json index 6018ffc67a..f666cd6be5 100644 --- a/botocore/data/sesv2/2019-09-27/service-2.json +++ b/botocore/data/sesv2/2019-09-27/service-2.json @@ -6390,7 +6390,8 @@ "DKIM", "DMARC", "SPF", - "BIMI" + "BIMI", + "COMPLAINT" ] }, "RecommendationsList":{ diff --git a/botocore/data/sns/2010-03-31/service-2.json b/botocore/data/sns/2010-03-31/service-2.json index 8ff9f4cd24..70dd2549b9 100644 --- a/botocore/data/sns/2010-03-31/service-2.json +++ b/botocore/data/sns/2010-03-31/service-2.json @@ -1099,7 +1099,7 @@ }, "Attributes":{ "shape":"TopicAttributesMap", - "documentation":"A map of attributes with their corresponding values.
The following lists names, descriptions, and values of the special request parameters that the CreateTopic
action uses:
DeliveryPolicy
– The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.
DisplayName
– The display name to use for a topic with SMS subscriptions.
FifoTopic
– Set to true to create a FIFO topic.
Policy
– The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic.
SignatureVersion
– The signature version corresponds to the hashing algorithm used while creating the signature of the notifications, subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS. By default, SignatureVersion
is set to 1
.
TracingConfig
– Tracing mode of an Amazon SNS topic. By default TracingConfig
is set to PassThrough
, and the topic passes through the tracing header it receives from an Amazon SNS publisher to its subscriptions. If set to Active
, Amazon SNS will vend X-Ray segment data to topic owner account if the sampled flag in the tracing header is true. This is only supported on standard topics.
The following attribute applies only to server-side encryption:
KmsMasterKeyId
– The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the Key Management Service API Reference.
The following attributes apply only to FIFO topics:
ArchivePolicy
– The policy that sets the retention period for messages stored in the message archive of an Amazon SNS FIFO topic.
ContentBasedDeduplication
– Enables content-based deduplication for FIFO topics.
By default, ContentBasedDeduplication
is set to false
. If you create a FIFO topic and this attribute is false
, you must specify a value for the MessageDeduplicationId
parameter for the Publish action.
When you set ContentBasedDeduplication
to true
, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId
using the body of the message (but not the attributes of the message).
(Optional) To override the generated value, you can specify a value for the MessageDeduplicationId
parameter for the Publish
action.
A map of attributes with their corresponding values.
The following lists names, descriptions, and values of the special request parameters that the CreateTopic
action uses:
DeliveryPolicy
– The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.
DisplayName
– The display name to use for a topic with SMS subscriptions.
FifoTopic
– Set to true to create a FIFO topic.
Policy
– The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic.
SignatureVersion
– The signature version corresponds to the hashing algorithm used while creating the signature of the notifications, subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS. By default, SignatureVersion
is set to 1
.
TracingConfig
– Tracing mode of an Amazon SNS topic. By default TracingConfig
is set to PassThrough
, and the topic passes through the tracing header it receives from an Amazon SNS publisher to its subscriptions. If set to Active
, Amazon SNS will vend X-Ray segment data to topic owner account if the sampled flag in the tracing header is true. This is only supported on standard topics.
The following attribute applies only to server-side encryption:
KmsMasterKeyId
– The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the Key Management Service API Reference.
The following attributes apply only to FIFO topics:
ArchivePolicy
– The policy that sets the retention period for messages stored in the message archive of an Amazon SNS FIFO topic.
ContentBasedDeduplication
– Enables content-based deduplication for FIFO topics.
By default, ContentBasedDeduplication
is set to false
. If you create a FIFO topic and this attribute is false
, you must specify a value for the MessageDeduplicationId
parameter for the Publish action.
When you set ContentBasedDeduplication
to true
, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId
using the body of the message (but not the attributes of the message).
(Optional) To override the generated value, you can specify a value for the MessageDeduplicationId
parameter for the Publish
action.
FifoThroughputScope
– Enables higher throughput for your FIFO topic by adjusting the scope of deduplication. This attribute has two possible values:
Topic
– The scope of message deduplication is across the entire topic. This is the default value and maintains existing behavior, with a maximum throughput of 3000 messages per second or 20MB per second, whichever comes first.
MessageGroup
– The scope of deduplication is within each individual message group, which enables higher throughput per topic subject to regional quotas. For more information on quotas or to request an increase, see Amazon SNS service quotas in the Amazon Web Services General Reference.
This parameter applies only to FIFO (first-in-first-out) topics.
The token used for deduplication of messages within a 5-minute minimum deduplication interval. If a message with a particular MessageDeduplicationId
is sent successfully, subsequent messages with the same MessageDeduplicationId
are accepted successfully but aren't delivered.
Every message must have a unique MessageDeduplicationId
.
You may provide a MessageDeduplicationId
explicitly.
If you aren't able to provide a MessageDeduplicationId
and you enable ContentBasedDeduplication
for your topic, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId
using the body of the message (but not the attributes of the message).
If you don't provide a MessageDeduplicationId
and the topic doesn't have ContentBasedDeduplication
set, the action fails with an error.
If the topic has a ContentBasedDeduplication
set, your MessageDeduplicationId
overrides the generated one.
When ContentBasedDeduplication
is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.
If you send one message with ContentBasedDeduplication
enabled, and then another message with a MessageDeduplicationId
that is the same as the one generated for the first MessageDeduplicationId
, the two messages are treated as duplicates and only one copy of the message is delivered.
The MessageDeduplicationId
is available to the consumer of the message (this can be useful for troubleshooting delivery issues).
If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId
after the deduplication interval, Amazon SNS can't detect duplicate messages.
Amazon SNS continues to keep track of the message deduplication ID even after the message is received and deleted.
The length of MessageDeduplicationId
is 128 characters.
MessageDeduplicationId
can contain alphanumeric characters (a-z, A-Z, 0-9)
and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~)
.
This parameter applies only to FIFO (first-in-first-out) topics.
This parameter applies only to FIFO (first-in-first-out) topics. The MessageDeduplicationId
can contain up to 128 alphanumeric characters (a-z, A-Z, 0-9)
and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~)
.
Every message must have a unique MessageDeduplicationId
, which is a token used for deduplication of sent messages within the 5 minute minimum deduplication interval.
The scope of deduplication depends on the FifoThroughputScope
attribute, when set to Topic
the message deduplication scope is across the entire topic, when set to MessageGroup
the message deduplication scope is within each individual message group.
If a message with a particular MessageDeduplicationId
is sent successfully, subsequent messages within the deduplication scope and interval, with the same MessageDeduplicationId
, are accepted successfully but aren't delivered.
Every message must have a unique MessageDeduplicationId
.
You may provide a MessageDeduplicationId
explicitly.
If you aren't able to provide a MessageDeduplicationId
and you enable ContentBasedDeduplication
for your topic, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId
using the body of the message (but not the attributes of the message).
If you don't provide a MessageDeduplicationId
and the topic doesn't have ContentBasedDeduplication
set, the action fails with an error.
If the topic has a ContentBasedDeduplication
set, your MessageDeduplicationId
overrides the generated one.
When ContentBasedDeduplication
is in effect, messages with identical content sent within the deduplication scope and interval are treated as duplicates and only one copy of the message is delivered.
If you send one message with ContentBasedDeduplication
enabled, and then another message with a MessageDeduplicationId
that is the same as the one generated for the first MessageDeduplicationId
, the two messages are treated as duplicates, within the deduplication scope and interval, and only one copy of the message is delivered.
The MessageDeduplicationId
is available to the consumer of the message (this can be useful for troubleshooting delivery issues).
If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId
after the deduplication interval, Amazon SNS can't detect duplicate messages.
Amazon SNS continues to keep track of the message deduplication ID even after the message is received and deleted.
This parameter applies only to FIFO (first-in-first-out) topics. The MessageDeduplicationId
can contain up to 128 alphanumeric characters (a-z, A-Z, 0-9)
and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~)
.
Every message must have a unique MessageDeduplicationId
, which is a token used for deduplication of sent messages. If a message with a particular MessageDeduplicationId
is sent successfully, any message sent with the same MessageDeduplicationId
during the 5-minute deduplication interval is treated as a duplicate.
If the topic has ContentBasedDeduplication
set, the system generates a MessageDeduplicationId
based on the contents of the message. Your MessageDeduplicationId
overrides the generated one.
This parameter applies only to FIFO (first-in-first-out) topics. The MessageDeduplicationId
can contain up to 128 alphanumeric characters (a-z, A-Z, 0-9)
and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~)
.
Every message must have a unique MessageDeduplicationId
, which is a token used for deduplication of sent messages within the 5 minute minimum deduplication interval.
The scope of deduplication depends on the FifoThroughputScope
attribute, when set to Topic
the message deduplication scope is across the entire topic, when set to MessageGroup
the message deduplication scope is within each individual message group.
If a message with a particular MessageDeduplicationId
is sent successfully, subsequent messages within the deduplication scope and interval, with the same MessageDeduplicationId
, are accepted successfully but aren't delivered.
Every message must have a unique MessageDeduplicationId
:
You may provide a MessageDeduplicationId
explicitly.
If you aren't able to provide a MessageDeduplicationId
and you enable ContentBasedDeduplication
for your topic, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId
using the body of the message (but not the attributes of the message).
If you don't provide a MessageDeduplicationId
and the topic doesn't have ContentBasedDeduplication
set, the action fails with an error.
If the topic has a ContentBasedDeduplication
set, your MessageDeduplicationId
overrides the generated one.
When ContentBasedDeduplication
is in effect, messages with identical content sent within the deduplication scope and interval are treated as duplicates and only one copy of the message is delivered.
If you send one message with ContentBasedDeduplication
enabled, and then another message with a MessageDeduplicationId
that is the same as the one generated for the first MessageDeduplicationId
, the two messages are treated as duplicates, within the deduplication scope and interval, and only one copy of the message is delivered.
A map of attributes with their corresponding values.
The following lists the names, descriptions, and values of the special request parameters that the SetTopicAttributes
action uses:
ApplicationSuccessFeedbackRoleArn
– Indicates failed message delivery status for an Amazon SNS topic that is subscribed to a platform application endpoint.
DeliveryPolicy
– The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.
DisplayName
– The display name to use for a topic with SMS subscriptions.
Policy
– The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic.
TracingConfig
– Tracing mode of an Amazon SNS topic. By default TracingConfig
is set to PassThrough
, and the topic passes through the tracing header it receives from an Amazon SNS publisher to its subscriptions. If set to Active
, Amazon SNS will vend X-Ray segment data to topic owner account if the sampled flag in the tracing header is true. This is only supported on standard topics.
HTTP
HTTPSuccessFeedbackRoleArn
– Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an HTTP endpoint.
HTTPSuccessFeedbackSampleRate
– Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an HTTP endpoint.
HTTPFailureFeedbackRoleArn
– Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an HTTP endpoint.
Amazon Kinesis Data Firehose
FirehoseSuccessFeedbackRoleArn
– Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Amazon Kinesis Data Firehose endpoint.
FirehoseSuccessFeedbackSampleRate
– Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Amazon Kinesis Data Firehose endpoint.
FirehoseFailureFeedbackRoleArn
– Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Amazon Kinesis Data Firehose endpoint.
Lambda
LambdaSuccessFeedbackRoleArn
– Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Lambda endpoint.
LambdaSuccessFeedbackSampleRate
– Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Lambda endpoint.
LambdaFailureFeedbackRoleArn
– Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Lambda endpoint.
Platform application endpoint
ApplicationSuccessFeedbackRoleArn
– Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Amazon Web Services application endpoint.
ApplicationSuccessFeedbackSampleRate
– Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Amazon Web Services application endpoint.
ApplicationFailureFeedbackRoleArn
– Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Amazon Web Services application endpoint.
In addition to being able to configure topic attributes for message delivery status of notification messages sent to Amazon SNS application endpoints, you can also configure application attributes for the delivery status of push notification messages sent to push notification services.
For example, For more information, see Using Amazon SNS Application Attributes for Message Delivery Status.
Amazon SQS
SQSSuccessFeedbackRoleArn
– Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Amazon SQS endpoint.
SQSSuccessFeedbackSampleRate
– Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Amazon SQS endpoint.
SQSFailureFeedbackRoleArn
– Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Amazon SQS endpoint.
The <ENDPOINT>SuccessFeedbackRoleArn and <ENDPOINT>FailureFeedbackRoleArn attributes are used to give Amazon SNS write access to use CloudWatch Logs on your behalf. The <ENDPOINT>SuccessFeedbackSampleRate attribute is for specifying the sample rate percentage (0-100) of successfully delivered messages. After you configure the <ENDPOINT>FailureFeedbackRoleArn attribute, then all failed message deliveries generate CloudWatch Logs.
The following attribute applies only to server-side-encryption:
KmsMasterKeyId
– The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the Key Management Service API Reference.
SignatureVersion
– The signature version corresponds to the hashing algorithm used while creating the signature of the notifications, subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS. By default, SignatureVersion
is set to 1
.
The following attribute applies only to FIFO topics:
ArchivePolicy
– The policy that sets the retention period for messages stored in the message archive of an Amazon SNS FIFO topic.
ContentBasedDeduplication
– Enables content-based deduplication for FIFO topics.
By default, ContentBasedDeduplication
is set to false
. If you create a FIFO topic and this attribute is false
, you must specify a value for the MessageDeduplicationId
parameter for the Publish action.
When you set ContentBasedDeduplication
to true
, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId
using the body of the message (but not the attributes of the message).
(Optional) To override the generated value, you can specify a value for the MessageDeduplicationId
parameter for the Publish
action.
A map of attributes with their corresponding values.
The following lists the names, descriptions, and values of the special request parameters that the SetTopicAttributes
action uses:
ApplicationSuccessFeedbackRoleArn
– Indicates failed message delivery status for an Amazon SNS topic that is subscribed to a platform application endpoint.
DeliveryPolicy
– The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.
DisplayName
– The display name to use for a topic with SMS subscriptions.
Policy
– The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic.
TracingConfig
– Tracing mode of an Amazon SNS topic. By default TracingConfig
is set to PassThrough
, and the topic passes through the tracing header it receives from an Amazon SNS publisher to its subscriptions. If set to Active
, Amazon SNS will vend X-Ray segment data to topic owner account if the sampled flag in the tracing header is true. This is only supported on standard topics.
HTTP
HTTPSuccessFeedbackRoleArn
– Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an HTTP endpoint.
HTTPSuccessFeedbackSampleRate
– Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an HTTP endpoint.
HTTPFailureFeedbackRoleArn
– Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an HTTP endpoint.
Amazon Kinesis Data Firehose
FirehoseSuccessFeedbackRoleArn
– Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Amazon Kinesis Data Firehose endpoint.
FirehoseSuccessFeedbackSampleRate
– Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Amazon Kinesis Data Firehose endpoint.
FirehoseFailureFeedbackRoleArn
– Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Amazon Kinesis Data Firehose endpoint.
Lambda
LambdaSuccessFeedbackRoleArn
– Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Lambda endpoint.
LambdaSuccessFeedbackSampleRate
– Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Lambda endpoint.
LambdaFailureFeedbackRoleArn
– Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Lambda endpoint.
Platform application endpoint
ApplicationSuccessFeedbackRoleArn
– Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Amazon Web Services application endpoint.
ApplicationSuccessFeedbackSampleRate
– Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Amazon Web Services application endpoint.
ApplicationFailureFeedbackRoleArn
– Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Amazon Web Services application endpoint.
In addition to being able to configure topic attributes for message delivery status of notification messages sent to Amazon SNS application endpoints, you can also configure application attributes for the delivery status of push notification messages sent to push notification services.
For example, For more information, see Using Amazon SNS Application Attributes for Message Delivery Status.
Amazon SQS
SQSSuccessFeedbackRoleArn
– Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Amazon SQS endpoint.
SQSSuccessFeedbackSampleRate
– Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Amazon SQS endpoint.
SQSFailureFeedbackRoleArn
– Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Amazon SQS endpoint.
The <ENDPOINT>SuccessFeedbackRoleArn and <ENDPOINT>FailureFeedbackRoleArn attributes are used to give Amazon SNS write access to use CloudWatch Logs on your behalf. The <ENDPOINT>SuccessFeedbackSampleRate attribute is for specifying the sample rate percentage (0-100) of successfully delivered messages. After you configure the <ENDPOINT>FailureFeedbackRoleArn attribute, then all failed message deliveries generate CloudWatch Logs.
The following attribute applies only to server-side-encryption:
KmsMasterKeyId
– The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the Key Management Service API Reference.
SignatureVersion
– The signature version corresponds to the hashing algorithm used while creating the signature of the notifications, subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS. By default, SignatureVersion
is set to 1
.
The following attribute applies only to FIFO topics:
ArchivePolicy
– The policy that sets the retention period for messages stored in the message archive of an Amazon SNS FIFO topic.
ContentBasedDeduplication
– Enables content-based deduplication for FIFO topics.
By default, ContentBasedDeduplication
is set to false
. If you create a FIFO topic and this attribute is false
, you must specify a value for the MessageDeduplicationId
parameter for the Publish action.
When you set ContentBasedDeduplication
to true
, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId
using the body of the message (but not the attributes of the message).
(Optional) To override the generated value, you can specify a value for the MessageDeduplicationId
parameter for the Publish
action.
FifoThroughputScope
– Enables higher throughput for your FIFO topic by adjusting the scope of deduplication. This attribute has two possible values:
Topic
– The scope of message deduplication is across the entire topic. This is the default value and maintains existing behavior, with a maximum throughput of 3000 messages per second or 20MB per second, whichever comes first.
MessageGroup
– The scope of deduplication is within each individual message group, which enables higher throughput per topic subject to regional quotas. For more information on quotas or to request an increase, see Amazon SNS service quotas in the Amazon Web Services General Reference.
Associates a related item to a Systems Manager OpsCenter OpsItem. For example, you can associate an Incident Manager incident or analysis with an OpsItem. Incident Manager and OpsCenter are capabilities of Amazon Web Services Systems Manager.
" + "documentation":"Associates a related item to a Systems Manager OpsCenter OpsItem. For example, you can associate an Incident Manager incident or analysis with an OpsItem. Incident Manager and OpsCenter are tools in Amazon Web Services Systems Manager.
" }, "CancelCommand":{ "name":"CancelCommand", @@ -92,7 +92,7 @@ {"shape":"InvalidParameters"}, {"shape":"InternalServerError"} ], - "documentation":"Generates an activation code and activation ID you can use to register your on-premises servers, edge devices, or virtual machine (VM) with Amazon Web Services Systems Manager. Registering these machines with Systems Manager makes it possible to manage them using Systems Manager capabilities. You use the activation code and ID when installing SSM Agent on machines in your hybrid environment. For more information about requirements for managing on-premises machines using Systems Manager, see Using Amazon Web Services Systems Manager in hybrid and multicloud environments in the Amazon Web Services Systems Manager User Guide.
Amazon Elastic Compute Cloud (Amazon EC2) instances, edge devices, and on-premises servers and VMs that are configured for Systems Manager are all called managed nodes.
Generates an activation code and activation ID you can use to register your on-premises servers, edge devices, or virtual machine (VM) with Amazon Web Services Systems Manager. Registering these machines with Systems Manager makes it possible to manage them using Systems Manager tools. You use the activation code and ID when installing SSM Agent on machines in your hybrid environment. For more information about requirements for managing on-premises machines using Systems Manager, see Using Amazon Web Services Systems Manager in hybrid and multicloud environments in the Amazon Web Services Systems Manager User Guide.
Amazon Elastic Compute Cloud (Amazon EC2) instances, edge devices, and on-premises servers and VMs that are configured for Systems Manager are all called managed nodes.
A State Manager association defines the state that you want to maintain on your managed nodes. For example, an association can specify that anti-virus software must be installed and running on your managed nodes, or that certain ports must be closed. For static targets, the association specifies a schedule for when the configuration is reapplied. For dynamic targets, such as an Amazon Web Services resource group or an Amazon Web Services autoscaling group, State Manager, a capability of Amazon Web Services Systems Manager applies the configuration when new managed nodes are added to the group. The association also specifies actions to take when applying the configuration. For example, an association for anti-virus software might run once a day. If the software isn't installed, then State Manager installs it. If the software is installed, but the service isn't running, then the association might instruct State Manager to start the service.
" + "documentation":"A State Manager association defines the state that you want to maintain on your managed nodes. For example, an association can specify that anti-virus software must be installed and running on your managed nodes, or that certain ports must be closed. For static targets, the association specifies a schedule for when the configuration is reapplied. For dynamic targets, such as an Amazon Web Services resource group or an Amazon Web Services autoscaling group, State Manager, a tool in Amazon Web Services Systems Manager applies the configuration when new managed nodes are added to the group. The association also specifies actions to take when applying the configuration. For example, an association for anti-virus software might run once a day. If the software isn't installed, then State Manager installs it. If the software is installed, but the service isn't running, then the association might instruct State Manager to start the service.
" }, "CreateAssociationBatch":{ "name":"CreateAssociationBatch", @@ -985,7 +985,7 @@ {"shape":"OpsItemInvalidParameterException"}, {"shape":"OpsItemConflictException"} ], - "documentation":"Deletes the association between an OpsItem and a related item. For example, this API operation can delete an Incident Manager incident from an OpsItem. Incident Manager is a capability of Amazon Web Services Systems Manager.
" + "documentation":"Deletes the association between an OpsItem and a related item. For example, this API operation can delete an Incident Manager incident from an OpsItem. Incident Manager is a tool in Amazon Web Services Systems Manager.
" }, "GetAutomationExecution":{ "name":"GetAutomationExecution", @@ -1015,7 +1015,7 @@ {"shape":"InvalidDocumentType"}, {"shape":"UnsupportedCalendarException"} ], - "documentation":"Gets the state of a Amazon Web Services Systems Manager change calendar at the current time or a specified time. If you specify a time, GetCalendarState
returns the state of the calendar at that specific time, and returns the next time that the change calendar state will transition. If you don't specify a time, GetCalendarState
uses the current time. Change Calendar entries have two possible states: OPEN
or CLOSED
.
If you specify more than one calendar in a request, the command returns the status of OPEN
only if all calendars in the request are open. If one or more calendars in the request are closed, the status returned is CLOSED
.
For more information about Change Calendar, a capability of Amazon Web Services Systems Manager, see Amazon Web Services Systems Manager Change Calendar in the Amazon Web Services Systems Manager User Guide.
" + "documentation":"Gets the state of a Amazon Web Services Systems Manager change calendar at the current time or a specified time. If you specify a time, GetCalendarState
returns the state of the calendar at that specific time, and returns the next time that the change calendar state will transition. If you don't specify a time, GetCalendarState
uses the current time. Change Calendar entries have two possible states: OPEN
or CLOSED
.
If you specify more than one calendar in a request, the command returns the status of OPEN
only if all calendars in the request are open. If one or more calendars in the request are closed, the status returned is CLOSED
.
For more information about Change Calendar, a tool in Amazon Web Services Systems Manager, see Amazon Web Services Systems Manager Change Calendar in the Amazon Web Services Systems Manager User Guide.
" }, "GetCommandInvocation":{ "name":"GetCommandInvocation", @@ -1073,7 +1073,7 @@ {"shape":"UnsupportedOperatingSystem"}, {"shape":"UnsupportedFeatureRequiredException"} ], - "documentation":"Retrieves the current snapshot for the patch baseline the managed node uses. This API is primarily used by the AWS-RunPatchBaseline
Systems Manager document (SSM document).
If you run the command locally, such as with the Command Line Interface (CLI), the system attempts to use your local Amazon Web Services credentials and the operation fails. To avoid this, you can run the command in the Amazon Web Services Systems Manager console. Use Run Command, a capability of Amazon Web Services Systems Manager, with an SSM document that enables you to target a managed node with a script or command. For example, run the command using the AWS-RunShellScript
document or the AWS-RunPowerShellScript
document.
Retrieves the current snapshot for the patch baseline the managed node uses. This API is primarily used by the AWS-RunPatchBaseline
Systems Manager document (SSM document).
If you run the command locally, such as with the Command Line Interface (CLI), the system attempts to use your local Amazon Web Services credentials and the operation fails. To avoid this, you can run the command in the Amazon Web Services Systems Manager console. Use Run Command, a tool in Amazon Web Services Systems Manager, with an SSM document that enables you to target a managed node with a script or command. For example, run the command using the AWS-RunShellScript
document or the AWS-RunPowerShellScript
document.
Retrieve information about one or more parameters in a specific hierarchy.
Request results are returned on a best-effort basis. If you specify MaxResults
in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults
. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken
. You can specify the NextToken
in a subsequent call to get the next set of results.
Retrieve information about one or more parameters under a specified level in a hierarchy.
Request results are returned on a best-effort basis. If you specify MaxResults
in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults
. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken
. You can specify the NextToken
in a subsequent call to get the next set of results.
Returns all State Manager associations in the current Amazon Web Services account and Amazon Web Services Region. You can limit the results to a specific State Manager association document or managed node by specifying a filter. State Manager is a capability of Amazon Web Services Systems Manager.
" + "documentation":"Returns all State Manager associations in the current Amazon Web Services account and Amazon Web Services Region. You can limit the results to a specific State Manager association document or managed node by specifying a filter. State Manager is a tool in Amazon Web Services Systems Manager.
" }, "ListCommandInvocations":{ "name":"ListCommandInvocations", @@ -1615,7 +1615,7 @@ {"shape":"InternalServerError"}, {"shape":"OpsItemInvalidParameterException"} ], - "documentation":"Lists all related-item resources associated with a Systems Manager OpsCenter OpsItem. OpsCenter is a capability of Amazon Web Services Systems Manager.
" + "documentation":"Lists all related-item resources associated with a Systems Manager OpsCenter OpsItem. OpsCenter is a tool in Amazon Web Services Systems Manager.
" }, "ListOpsMetadata":{ "name":"ListOpsMetadata", @@ -2675,7 +2675,7 @@ }, "AutomationTargetParameterName":{ "shape":"AutomationTargetParameterName", - "documentation":"Choose the parameter that will define how your automation will branch out. This target is required for associations that use an Automation runbook and target resources by using rate controls. Automation is a capability of Amazon Web Services Systems Manager.
" + "documentation":"Choose the parameter that will define how your automation will branch out. This target is required for associations that use an Automation runbook and target resources by using rate controls. Automation is a tool in Amazon Web Services Systems Manager.
" }, "Parameters":{ "shape":"Parameters", @@ -2723,7 +2723,7 @@ }, "SyncCompliance":{ "shape":"AssociationSyncCompliance", - "documentation":"The mode for generating association compliance. You can specify AUTO
or MANUAL
. In AUTO
mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT
. If the association execution doesn't run successfully, the association is NON-COMPLIANT
.
In MANUAL
mode, you must specify the AssociationId
as a parameter for the PutComplianceItems API operation. In this case, compliance data isn't managed by State Manager, a capability of Amazon Web Services Systems Manager. It is managed by your direct call to the PutComplianceItems API operation.
By default, all associations use AUTO
mode.
The mode for generating association compliance. You can specify AUTO
or MANUAL
. In AUTO
mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT
. If the association execution doesn't run successfully, the association is NON-COMPLIANT
.
In MANUAL
mode, you must specify the AssociationId
as a parameter for the PutComplianceItems API operation. In this case, compliance data isn't managed by State Manager, a tool in Amazon Web Services Systems Manager. It is managed by your direct call to the PutComplianceItems API operation.
By default, all associations use AUTO
mode.
The mode for generating association compliance. You can specify AUTO
or MANUAL
. In AUTO
mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT
. If the association execution doesn't run successfully, the association is NON-COMPLIANT
.
In MANUAL
mode, you must specify the AssociationId
as a parameter for the PutComplianceItems API operation. In this case, compliance data isn't managed by State Manager, a capability of Amazon Web Services Systems Manager. It is managed by your direct call to the PutComplianceItems API operation.
By default, all associations use AUTO
mode.
The mode for generating association compliance. You can specify AUTO
or MANUAL
. In AUTO
mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT
. If the association execution doesn't run successfully, the association is NON-COMPLIANT
.
In MANUAL
mode, you must specify the AssociationId
as a parameter for the PutComplianceItems API operation. In this case, compliance data isn't managed by State Manager, a tool in Amazon Web Services Systems Manager. It is managed by your direct call to the PutComplianceItems API operation.
By default, all associations use AUTO
mode.
The Identity and Access Management (IAM) service role that Run Command, a capability of Amazon Web Services Systems Manager, uses to act on your behalf when sending notifications about command status changes.
" + "documentation":"The Identity and Access Management (IAM) service role that Run Command, a tool in Amazon Web Services Systems Manager, uses to act on your behalf when sending notifications about command status changes.
" }, "NotificationConfig":{ "shape":"NotificationConfig", @@ -4194,7 +4194,7 @@ }, "ServiceRole":{ "shape":"ServiceRole", - "documentation":"The Identity and Access Management (IAM) service role that Run Command, a capability of Amazon Web Services Systems Manager, uses to act on your behalf when sending notifications about command status changes on a per managed node basis.
" + "documentation":"The Identity and Access Management (IAM) service role that Run Command, a tool in Amazon Web Services Systems Manager, uses to act on your behalf when sending notifications about command status changes on a per managed node basis.
" }, "NotificationConfig":{ "shape":"NotificationConfig", @@ -4684,7 +4684,7 @@ }, "AutomationTargetParameterName":{ "shape":"AutomationTargetParameterName", - "documentation":"Specify the target for the association. This target is required for associations that use an Automation runbook and target resources by using rate controls. Automation is a capability of Amazon Web Services Systems Manager.
" + "documentation":"Specify the target for the association. This target is required for associations that use an Automation runbook and target resources by using rate controls. Automation is a tool in Amazon Web Services Systems Manager.
" }, "DocumentVersion":{ "shape":"DocumentVersion", @@ -4720,7 +4720,7 @@ }, "SyncCompliance":{ "shape":"AssociationSyncCompliance", - "documentation":"The mode for generating association compliance. You can specify AUTO
or MANUAL
. In AUTO
mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT
. If the association execution doesn't run successfully, the association is NON-COMPLIANT
.
In MANUAL
mode, you must specify the AssociationId
as a parameter for the PutComplianceItems API operation. In this case, compliance data isn't managed by State Manager, a capability of Amazon Web Services Systems Manager. It is managed by your direct call to the PutComplianceItems API operation.
By default, all associations use AUTO
mode.
The mode for generating association compliance. You can specify AUTO
or MANUAL
. In AUTO
mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT
. If the association execution doesn't run successfully, the association is NON-COMPLIANT
.
In MANUAL
mode, you must specify the AssociationId
as a parameter for the PutComplianceItems API operation. In this case, compliance data isn't managed by State Manager, a tool in Amazon Web Services Systems Manager. It is managed by your direct call to the PutComplianceItems API operation.
By default, all associations use AUTO
mode.
Choose the parameter that will define how your automation will branch out. This target is required for associations that use an Automation runbook and target resources by using rate controls. Automation is a capability of Amazon Web Services Systems Manager.
" + "documentation":"Choose the parameter that will define how your automation will branch out. This target is required for associations that use an Automation runbook and target resources by using rate controls. Automation is a tool in Amazon Web Services Systems Manager.
" }, "MaxErrors":{ "shape":"MaxErrors", @@ -6685,7 +6685,7 @@ }, "InstancesWithUnreportedNotApplicablePatches":{ "shape":"Integer", - "documentation":"The number of managed nodes with NotApplicable
patches beyond the supported limit, which aren't reported by name to Inventory. Inventory is a capability of Amazon Web Services Systems Manager.
The number of managed nodes with NotApplicable
patches beyond the supported limit, which aren't reported by name to Inventory. Inventory is a tool in Amazon Web Services Systems Manager.
The number of patches beyond the supported limit of NotApplicableCount
that aren't reported by name to Inventory. Inventory is a capability of Amazon Web Services Systems Manager.
The number of patches beyond the supported limit of NotApplicableCount
that aren't reported by name to Inventory. Inventory is a tool in Amazon Web Services Systems Manager.
The type of policy. Parameter Store, a capability of Amazon Web Services Systems Manager, supports the following policy types: Expiration, ExpirationNotification, and NoChangeNotification.
" + "documentation":"The type of policy. Parameter Store, a tool in Amazon Web Services Systems Manager, supports the following policy types: Expiration, ExpirationNotification, and NoChangeNotification.
" }, "PolicyStatus":{ "shape":"String", @@ -14563,7 +14563,7 @@ }, "Value":{ "shape":"PSParameterValue", - "documentation":"The parameter value that you want to add to the system. Standard parameters have a value limit of 4 KB. Advanced parameters have a value limit of 8 KB.
Parameters can't be referenced or nested in the values of other parameters. You can't include {{}}
or {{ssm:parameter-name}}
in a parameter value.
The parameter value that you want to add to the system. Standard parameters have a value limit of 4 KB. Advanced parameters have a value limit of 8 KB.
Parameters can't be referenced or nested in the values of other parameters. You can't include values wrapped in double brackets {{}}
or {{ssm:parameter-name}}
in a parameter value.
One or more policies to apply to a parameter. This operation takes a JSON array. Parameter Store, a capability of Amazon Web Services Systems Manager supports the following policy types:
Expiration: This policy deletes the parameter after it expires. When you create the policy, you specify the expiration date. You can update the expiration date and time by updating the policy. Updating the parameter doesn't affect the expiration date and time. When the expiration time is reached, Parameter Store deletes the parameter.
ExpirationNotification: This policy initiates an event in Amazon CloudWatch Events that notifies you about the expiration. By using this policy, you can receive notification before or after the expiration time is reached, in units of days or hours.
NoChangeNotification: This policy initiates a CloudWatch Events event if a parameter hasn't been modified for a specified period of time. This policy type is useful when, for example, a secret needs to be changed within a period of time, but it hasn't been changed.
All existing policies are preserved until you send new policies or an empty policy. For more information about parameter policies, see Assigning parameter policies.
" + "documentation":"One or more policies to apply to a parameter. This operation takes a JSON array. Parameter Store, a tool in Amazon Web Services Systems Manager supports the following policy types:
Expiration: This policy deletes the parameter after it expires. When you create the policy, you specify the expiration date. You can update the expiration date and time by updating the policy. Updating the parameter doesn't affect the expiration date and time. When the expiration time is reached, Parameter Store deletes the parameter.
ExpirationNotification: This policy initiates an event in Amazon CloudWatch Events that notifies you about the expiration. By using this policy, you can receive notification before or after the expiration time is reached, in units of days or hours.
NoChangeNotification: This policy initiates a CloudWatch Events event if a parameter hasn't been modified for a specified period of time. This policy type is useful when, for example, a secret needs to be changed within a period of time, but it hasn't been changed.
All existing policies are preserved until you send new policies or an empty policy. For more information about parameter policies, see Assigning parameter policies.
" }, "DataType":{ "shape":"ParameterDataType", @@ -15729,7 +15729,7 @@ }, "CloudWatchOutputConfig":{ "shape":"CloudWatchOutputConfig", - "documentation":"Enables Amazon Web Services Systems Manager to send Run Command output to Amazon CloudWatch Logs. Run Command is a capability of Amazon Web Services Systems Manager.
" + "documentation":"Enables Amazon Web Services Systems Manager to send Run Command output to Amazon CloudWatch Logs. Run Command is a tool in Amazon Web Services Systems Manager.
" }, "AlarmConfiguration":{ "shape":"AlarmConfiguration", @@ -16123,7 +16123,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"Optional metadata that you assign to a resource. You can specify a maximum of five tags for an automation. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag an automation to identify an environment or operating system. In this case, you could specify the following key-value pairs:
Key=environment,Value=test
Key=OS,Value=Windows
To add tags to an existing automation, use the AddTagsToResource operation.
Optional metadata that you assign to a resource. You can specify a maximum of five tags for an automation. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag an automation to identify an environment or operating system. In this case, you could specify the following key-value pairs:
Key=environment,Value=test
Key=OS,Value=Windows
The Array Members
maximum value is reported as 1000. This number includes capacity reserved for internal operations. When calling the StartAutomationExecution
action, you can specify a maximum of 5 tags. You can, however, use the AddTagsToResource action to add up to a total of 50 tags to an existing automation configuration.
Optional metadata that you assign to a resource. You can specify a maximum of five tags for a change request. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a change request to identify an environment or target Amazon Web Services Region. In this case, you could specify the following key-value pairs:
Key=Environment,Value=Production
Key=Region,Value=us-east-2
Optional metadata that you assign to a resource. You can specify a maximum of five tags for a change request. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a change request to identify an environment or target Amazon Web Services Region. In this case, you could specify the following key-value pairs:
Key=Environment,Value=Production
Key=Region,Value=us-east-2
The Array Members
maximum value is reported as 1000. This number includes capacity reserved for internal operations. When calling the StartChangeRequestExecution
action, you can specify a maximum of 5 tags. You can, however, use the AddTagsToResource action to add up to a total of 50 tags to an existing change request configuration.
The values you want to specify for the parameters defined in the Session document.
" + "documentation":"The values you want to specify for the parameters defined in the Session document. For more information about these parameters, see Create a Session Manager preferences document in the Amazon Web Services Systems Manager User Guide.
" } } }, @@ -16553,7 +16553,7 @@ "documentation":"User-defined criteria that maps to Key
. For example, if you specified tag:ServerRole
, you could specify value:WebServer
to run a command on instances that include EC2 tags of ServerRole,WebServer
.
Depending on the type of target, the maximum number of values for a key might be lower than the global maximum of 50.
" } }, - "documentation":"An array of search criteria that targets managed nodes using a key-value pair that you specify.
One or more targets must be specified for maintenance window Run Command-type tasks. Depending on the task, targets are optional for other maintenance window task types (Automation, Lambda, and Step Functions). For more information about running tasks that don't specify targets, see Registering maintenance window tasks without targets in the Amazon Web Services Systems Manager User Guide.
Supported formats include the following.
For all Systems Manager capabilities:
Key=tag-key,Values=tag-value-1,tag-value-2
For Automation and Change Manager:
Key=tag:tag-key,Values=tag-value
Key=ResourceGroup,Values=resource-group-name
Key=ParameterValues,Values=value-1,value-2,value-3
To target all instances in the Amazon Web Services Region:
Key=AWS::EC2::Instance,Values=*
Key=InstanceIds,Values=*
For Run Command and Maintenance Windows:
Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3
Key=tag:tag-key,Values=tag-value-1,tag-value-2
Key=resource-groups:Name,Values=resource-group-name
Additionally, Maintenance Windows support targeting resource types:
Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2
For State Manager:
Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3
Key=tag:tag-key,Values=tag-value-1,tag-value-2
To target all instances in the Amazon Web Services Region:
Key=InstanceIds,Values=*
For more information about how to send commands that target managed nodes using Key,Value
parameters, see Targeting multiple managed nodes in the Amazon Web Services Systems Manager User Guide.
An array of search criteria that targets managed nodes using a key-value pair that you specify.
One or more targets must be specified for maintenance window Run Command-type tasks. Depending on the task, targets are optional for other maintenance window task types (Automation, Lambda, and Step Functions). For more information about running tasks that don't specify targets, see Registering maintenance window tasks without targets in the Amazon Web Services Systems Manager User Guide.
Supported formats include the following.
For all Systems Manager tools:
Key=tag-key,Values=tag-value-1,tag-value-2
For Automation and Change Manager:
Key=tag:tag-key,Values=tag-value
Key=ResourceGroup,Values=resource-group-name
Key=ParameterValues,Values=value-1,value-2,value-3
To target all instances in the Amazon Web Services Region:
Key=AWS::EC2::Instance,Values=*
Key=InstanceIds,Values=*
For Run Command and Maintenance Windows:
Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3
Key=tag:tag-key,Values=tag-value-1,tag-value-2
Key=resource-groups:Name,Values=resource-group-name
Additionally, Maintenance Windows support targeting resource types:
Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2
For State Manager:
Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3
Key=tag:tag-key,Values=tag-value-1,tag-value-2
To target all instances in the Amazon Web Services Region:
Key=InstanceIds,Values=*
For more information about how to send commands that target managed nodes using Key,Value
parameters, see Targeting multiple managed nodes in the Amazon Web Services Systems Manager User Guide.
The parameters you want to update for the association. If you create a parameter using Parameter Store, a capability of Amazon Web Services Systems Manager, you can reference the parameter using {{ssm:parameter-name}}
.
The parameters you want to update for the association. If you create a parameter using Parameter Store, a tool in Amazon Web Services Systems Manager, you can reference the parameter using {{ssm:parameter-name}}
.
Choose the parameter that will define how your automation will branch out. This target is required for associations that use an Automation runbook and target resources by using rate controls. Automation is a capability of Amazon Web Services Systems Manager.
" + "documentation":"Choose the parameter that will define how your automation will branch out. This target is required for associations that use an Automation runbook and target resources by using rate controls. Automation is a tool in Amazon Web Services Systems Manager.
" }, "MaxErrors":{ "shape":"MaxErrors", @@ -16928,7 +16928,7 @@ }, "SyncCompliance":{ "shape":"AssociationSyncCompliance", - "documentation":"The mode for generating association compliance. You can specify AUTO
or MANUAL
. In AUTO
mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT
. If the association execution doesn't run successfully, the association is NON-COMPLIANT
.
In MANUAL
mode, you must specify the AssociationId
as a parameter for the PutComplianceItems API operation. In this case, compliance data isn't managed by State Manager, a capability of Amazon Web Services Systems Manager. It is managed by your direct call to the PutComplianceItems API operation.
By default, all associations use AUTO
mode.
The mode for generating association compliance. You can specify AUTO
or MANUAL
. In AUTO
mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT
. If the association execution doesn't run successfully, the association is NON-COMPLIANT
.
In MANUAL
mode, you must specify the AssociationId
as a parameter for the PutComplianceItems API operation. In this case, compliance data isn't managed by State Manager, a tool in Amazon Web Services Systems Manager. It is managed by your direct call to the PutComplianceItems API operation.
By default, all associations use AUTO
mode.
Amazon Web Services Systems Manager is the operations hub for your Amazon Web Services applications and resources and a secure end-to-end management solution for hybrid cloud environments that enables safe and secure operations at scale.
This reference is intended to be used with the Amazon Web Services Systems Manager User Guide. To get started, see Setting up Amazon Web Services Systems Manager.
Related resources
For information about each of the capabilities that comprise Systems Manager, see Systems Manager capabilities in the Amazon Web Services Systems Manager User Guide.
For details about predefined runbooks for Automation, a capability of Amazon Web Services Systems Manager, see the Systems Manager Automation runbook reference .
For information about AppConfig, a capability of Systems Manager, see the AppConfig User Guide and the AppConfig API Reference .
For information about Incident Manager, a capability of Systems Manager, see the Systems Manager Incident Manager User Guide and the Systems Manager Incident Manager API Reference .
Amazon Web Services Systems Manager is the operations hub for your Amazon Web Services applications and resources and a secure end-to-end management solution for hybrid cloud environments that enables safe and secure operations at scale.
This reference is intended to be used with the Amazon Web Services Systems Manager User Guide. To get started, see Setting up Amazon Web Services Systems Manager.
Related resources
For information about each of the tools that comprise Systems Manager, see Using Systems Manager tools in the Amazon Web Services Systems Manager User Guide.
For details about predefined runbooks for Automation, a tool in Amazon Web Services Systems Manager, see the Systems Manager Automation runbook reference .
For information about AppConfig, a tool in Systems Manager, see the AppConfig User Guide and the AppConfig API Reference .
For information about Incident Manager, a tool in Systems Manager, see the Systems Manager Incident Manager User Guide and the Systems Manager Incident Manager API Reference .
Creates and returns access and refresh tokens for clients that are authenticated using client secrets. The access token can be used to fetch short-term credentials for the assigned AWS accounts or to access application APIs using bearer
authentication.
Creates and returns access and refresh tokens for clients that are authenticated using client secrets. The access token can be used to fetch short-lived credentials for the assigned AWS accounts or to access application APIs using bearer
authentication.
Creates and returns access and refresh tokens for clients and applications that are authenticated using IAM entities. The access token can be used to fetch short-term credentials for the assigned Amazon Web Services accounts or to access application APIs using bearer
authentication.
Creates and returns access and refresh tokens for clients and applications that are authenticated using IAM entities. The access token can be used to fetch short-lived credentials for the assigned Amazon Web Services accounts or to access application APIs using bearer
authentication.
Registers a client with IAM Identity Center. This allows clients to initiate device authorization. The output should be persisted for reuse through many authentication requests.
", - "authtype":"none" + "documentation":"Registers a public client with IAM Identity Center. This allows clients to perform authorization using the authorization code grant with Proof Key for Code Exchange (PKCE) or the device code grant.
", + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "StartDeviceAuthorization":{ "name":"StartDeviceAuthorization", @@ -97,7 +100,8 @@ {"shape":"InternalServerException"} ], "documentation":"Initiates device authorization by requesting a pair of verification codes from the authorization service.
", - "authtype":"none" + "authtype":"none", + "auth":["smithy.api#noAuth"] } }, "shapes":{ @@ -172,19 +176,19 @@ }, "grantType":{ "shape":"GrantType", - "documentation":"Supports the following OAuth grant types: Device Code and Refresh Token. Specify either of the following values, depending on the grant type that you want:
* Device Code - urn:ietf:params:oauth:grant-type:device_code
* Refresh Token - refresh_token
For information about how to obtain the device code, see the StartDeviceAuthorization topic.
" + "documentation":"Supports the following OAuth grant types: Authorization Code, Device Code, and Refresh Token. Specify one of the following values, depending on the grant type that you want:
* Authorization Code - authorization_code
* Device Code - urn:ietf:params:oauth:grant-type:device_code
* Refresh Token - refresh_token
Used only when calling this API for the Device Code grant type. This short-term code is used to identify this authorization request. This comes from the result of the StartDeviceAuthorization API.
" + "documentation":"Used only when calling this API for the Device Code grant type. This short-lived code is used to identify this authorization request. This comes from the result of the StartDeviceAuthorization API.
" }, "code":{ "shape":"AuthCode", - "documentation":"Used only when calling this API for the Authorization Code grant type. The short-term code is used to identify this authorization request. This grant type is currently unsupported for the CreateToken API.
" + "documentation":"Used only when calling this API for the Authorization Code grant type. The short-lived code is used to identify this authorization request.
" }, "refreshToken":{ "shape":"RefreshToken", - "documentation":"Used only when calling this API for the Refresh Token grant type. This token is used to refresh short-term tokens, such as the access token, that might expire.
For more information about the features and limitations of the current IAM Identity Center OIDC implementation, see Considerations for Using this Guide in the IAM Identity Center OIDC API Reference.
" + "documentation":"Used only when calling this API for the Refresh Token grant type. This token is used to refresh short-lived tokens, such as the access token, that might expire.
For more information about the features and limitations of the current IAM Identity Center OIDC implementation, see Considerations for Using this Guide in the IAM Identity Center OIDC API Reference.
" }, "scope":{ "shape":"Scopes", @@ -242,11 +246,11 @@ }, "code":{ "shape":"AuthCode", - "documentation":"Used only when calling this API for the Authorization Code grant type. This short-term code is used to identify this authorization request. The code is obtained through a redirect from IAM Identity Center to a redirect URI persisted in the Authorization Code GrantOptions for the application.
" + "documentation":"Used only when calling this API for the Authorization Code grant type. This short-lived code is used to identify this authorization request. The code is obtained through a redirect from IAM Identity Center to a redirect URI persisted in the Authorization Code GrantOptions for the application.
" }, "refreshToken":{ "shape":"RefreshToken", - "documentation":"Used only when calling this API for the Refresh Token grant type. This token is used to refresh short-term tokens, such as the access token, that might expire.
For more information about the features and limitations of the current IAM Identity Center OIDC implementation, see Considerations for Using this Guide in the IAM Identity Center OIDC API Reference.
" + "documentation":"Used only when calling this API for the Refresh Token grant type. This token is used to refresh short-lived tokens, such as the access token, that might expire.
For more information about the features and limitations of the current IAM Identity Center OIDC implementation, see Considerations for Using this Guide in the IAM Identity Center OIDC API Reference.
" }, "assertion":{ "shape":"Assertion", @@ -514,7 +518,7 @@ }, "grantTypes":{ "shape":"GrantTypes", - "documentation":"The list of OAuth 2.0 grant types that are defined by the client. This list is used to restrict the token granting flows available to the client.
" + "documentation":"The list of OAuth 2.0 grant types that are defined by the client. This list is used to restrict the token granting flows available to the client. Supports the following OAuth 2.0 grant types: Authorization Code, Device Code, and Refresh Token.
* Authorization Code - authorization_code
* Device Code - urn:ietf:params:oauth:grant-type:device_code
* Refresh Token - refresh_token
IAM Identity Center OpenID Connect (OIDC) is a web service that enables a client (such as CLI or a native application) to register with IAM Identity Center. The service also enables the client to fetch the user’s access token upon successful authentication and authorization with IAM Identity Center.
IAM Identity Center uses the sso
and identitystore
API namespaces.
Considerations for Using This Guide
Before you begin using this guide, we recommend that you first review the following important information about how the IAM Identity Center OIDC service works.
The IAM Identity Center OIDC service currently implements only the portions of the OAuth 2.0 Device Authorization Grant standard (https://tools.ietf.org/html/rfc8628) that are necessary to enable single sign-on authentication with the CLI.
With older versions of the CLI, the service only emits OIDC access tokens, so to obtain a new token, users must explicitly re-authenticate. To access the OIDC flow that supports token refresh and doesn’t require re-authentication, update to the latest CLI version (1.27.10 for CLI V1 and 2.9.0 for CLI V2) with support for OIDC token refresh and configurable IAM Identity Center session durations. For more information, see Configure Amazon Web Services access portal session duration .
The access tokens provided by this service grant access to all Amazon Web Services account entitlements assigned to an IAM Identity Center user, not just a particular application.
The documentation in this guide does not describe the mechanism to convert the access token into Amazon Web Services Auth (“sigv4”) credentials for use with IAM-protected Amazon Web Services service endpoints. For more information, see GetRoleCredentials in the IAM Identity Center Portal API Reference Guide.
For general information about IAM Identity Center, see What is IAM Identity Center? in the IAM Identity Center User Guide.
" + "documentation":"IAM Identity Center OpenID Connect (OIDC) is a web service that enables a client (such as CLI or a native application) to register with IAM Identity Center. The service also enables the client to fetch the user’s access token upon successful authentication and authorization with IAM Identity Center.
API namespaces
IAM Identity Center uses the sso
and identitystore
API namespaces. IAM Identity Center OpenID Connect uses the sso-oidc
namespace.
Considerations for using this guide
Before you begin using this guide, we recommend that you first review the following important information about how the IAM Identity Center OIDC service works.
The IAM Identity Center OIDC service currently implements only the portions of the OAuth 2.0 Device Authorization Grant standard (https://tools.ietf.org/html/rfc8628) that are necessary to enable single sign-on authentication with the CLI.
With older versions of the CLI, the service only emits OIDC access tokens, so to obtain a new token, users must explicitly re-authenticate. To access the OIDC flow that supports token refresh and doesn’t require re-authentication, update to the latest CLI version (1.27.10 for CLI V1 and 2.9.0 for CLI V2) with support for OIDC token refresh and configurable IAM Identity Center session durations. For more information, see Configure Amazon Web Services access portal session duration .
The access tokens provided by this service grant access to all Amazon Web Services account entitlements assigned to an IAM Identity Center user, not just a particular application.
The documentation in this guide does not describe the mechanism to convert the access token into Amazon Web Services Auth (“sigv4”) credentials for use with IAM-protected Amazon Web Services service endpoints. For more information, see GetRoleCredentials in the IAM Identity Center Portal API Reference Guide.
For general information about IAM Identity Center, see What is IAM Identity Center? in the IAM Identity Center User Guide.
" } diff --git a/botocore/data/sts/2011-06-15/service-2.json b/botocore/data/sts/2011-06-15/service-2.json index 59c22eb1c8..a3fabadccb 100644 --- a/botocore/data/sts/2011-06-15/service-2.json +++ b/botocore/data/sts/2011-06-15/service-2.json @@ -92,7 +92,7 @@ {"shape":"RegionDisabledException"}, {"shape":"ExpiredTokenException"} ], - "documentation":"Returns a set of short term credentials you can use to perform privileged tasks in a member account.
Before you can launch a privileged session, you must have enabled centralized root access in your organization. For steps to enable this feature, see Centralize root access for member accounts in the IAM User Guide.
The global endpoint is not supported for AssumeRoot. You must send this request to a Regional STS endpoint. For more information, see Endpoints.
You can track AssumeRoot in CloudTrail logs to determine what actions were performed in a session. For more information, see Track privileged tasks in CloudTrail in the IAM User Guide.
" + "documentation":"Returns a set of short term credentials you can use to perform privileged tasks on a member account in your organization.
Before you can launch a privileged session, you must have centralized root access in your organization. For steps to enable this feature, see Centralize root access for member accounts in the IAM User Guide.
The STS global endpoint is not supported for AssumeRoot. You must send this request to a Regional STS endpoint. For more information, see Endpoints.
You can track AssumeRoot in CloudTrail logs to determine what actions were performed in a session. For more information, see Track privileged tasks in CloudTrail in the IAM User Guide.
" }, "DecodeAuthorizationMessage":{ "name":"DecodeAuthorizationMessage", @@ -221,7 +221,7 @@ }, "SourceIdentity":{ "shape":"sourceIdentityType", - "documentation":"The source identity specified by the principal that is calling the AssumeRole
operation. The source identity value persists across chained role sessions.
You can require users to specify a source identity when they assume a role. You do this by using the sts:SourceIdentity
condition key in a role trust policy. You can use source identity information in CloudTrail logs to determine who took actions with a role. You can use the aws:SourceIdentity
condition key to further control access to Amazon Web Services resources based on the value of source identity. For more information about using source identity, see Monitor and control actions taken with assumed roles in the IAM User Guide.
The regex used to validate this parameter is a string of characters consisting of upper- and lower-case alphanumeric characters with no spaces. You can also include underscores or any of the following characters: =,.@-. You cannot use a value that begins with the text aws:
. This prefix is reserved for Amazon Web Services internal use.
The source identity specified by the principal that is calling the AssumeRole
operation. The source identity value persists across chained role sessions.
You can require users to specify a source identity when they assume a role. You do this by using the sts:SourceIdentity
condition key in a role trust policy. You can use source identity information in CloudTrail logs to determine who took actions with a role. You can use the aws:SourceIdentity
condition key to further control access to Amazon Web Services resources based on the value of source identity. For more information about using source identity, see Monitor and control actions taken with assumed roles in the IAM User Guide.
The regex used to validate this parameter is a string of characters consisting of upper- and lower-case alphanumeric characters with no spaces. You can also include underscores or any of the following characters: +=,.@-. You cannot use a value that begins with the text aws:
. This prefix is reserved for Amazon Web Services internal use.
The OAuth 2.0 access token or OpenID Connect ID token that is provided by the identity provider. Your application must get this token by authenticating the user who is using your application with a web identity provider before the application makes an AssumeRoleWithWebIdentity
call. Timestamps in the token must be formatted as either an integer or a long integer. Only tokens with RSA algorithms (RS256) are supported.
The OAuth 2.0 access token or OpenID Connect ID token that is provided by the identity provider. Your application must get this token by authenticating the user who is using your application with a web identity provider before the application makes an AssumeRoleWithWebIdentity
call. Timestamps in the token must be formatted as either an integer or a long integer. Tokens must be signed using either RSA keys (RS256, RS384, or RS512) or ECDSA keys (ES256, ES384, or ES512).
The identity based policy that scopes the session to the privileged tasks that can be performed. You can use one of following Amazon Web Services managed policies to scope root session actions. You can add additional customer managed policies to further limit the permissions for the root session.
The identity based policy that scopes the session to the privileged tasks that can be performed. You can use one of following Amazon Web Services managed policies to scope root session actions.
Specifies whether the DB instance will be deployed as a standalone instance or with a Multi-AZ standby for high availability.
" + }, + "dbStorageType":{ + "shape":"DbStorageType", + "documentation":"The Timestream for InfluxDB DB storage type that InfluxDB stores data on.
" + }, + "allocatedStorage":{ + "shape":"AllocatedStorage", + "documentation":"The amount of storage to allocate for your DB storage type (in gibibytes).
" } } }, diff --git a/botocore/data/transcribe/2017-10-26/service-2.json b/botocore/data/transcribe/2017-10-26/service-2.json index 85c25d8ab8..45f7eb9efa 100644 --- a/botocore/data/transcribe/2017-10-26/service-2.json +++ b/botocore/data/transcribe/2017-10-26/service-2.json @@ -11,7 +11,8 @@ "signatureVersion":"v4", "signingName":"transcribe", "targetPrefix":"Transcribe", - "uid":"transcribe-2017-10-26" + "uid":"transcribe-2017-10-26", + "auth":["aws.auth#sigv4"] }, "operations":{ "CreateCallAnalyticsCategory":{ @@ -805,6 +806,10 @@ "ChannelDefinitions":{ "shape":"ChannelDefinitions", "documentation":"Indicates which speaker is on which channel.
" + }, + "Tags":{ + "shape":"TagList", + "documentation":"The tags, each in the form of a key:value pair, assigned to the specified call analytics job.
" } }, "documentation":"Provides detailed information about a Call Analytics job.
To view the job's status, refer to CallAnalyticsJobStatus
. If the status is COMPLETED
, the job is finished. You can find your completed transcript at the URI specified in TranscriptFileUri
. If the status is FAILED
, FailureReason
provides details on why your transcription job failed.
If you enabled personally identifiable information (PII) redaction, the redacted transcript appears at the location specified in RedactedTranscriptFileUri
.
If you chose to redact the audio in your media file, you can find your redacted media file at the location specified in the RedactedMediaFileUri
field of your response.
The date and time the specified Call Analytics category was last updated.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For example, 2022-05-05T12:45:32.691000-07:00
represents 12:45 PM UTC-7 on May 5, 2022.
The tags, each in the form of a key:value pair, assigned to the specified call analytics category.
" + }, "InputType":{ "shape":"InputType", "documentation":"The input type associated with the specified category. POST_CALL
refers to a category that is applied to batch transcriptions; REAL_TIME
refers to a category that is applied to streaming transcriptions.
Rules define a Call Analytics category. When creating a new category, you must create between 1 and 20 rules for that category. For each rule, you specify a filter you want applied to the attributes of a call. For example, you can choose a sentiment filter that detects if a customer's sentiment was positive during the last 30 seconds of the call.
" }, + "Tags":{ + "shape":"TagList", + "documentation":"Adds one or more custom tags, each in the form of a key:value pair, to a new call analytics category at the time you start this new job.
To learn more about using tags with Amazon Transcribe, refer to Tagging resources.
" + }, "InputType":{ "shape":"InputType", "documentation":"Choose whether you want to create a real-time or a post-call category for your Call Analytics transcription.
Specifying POST_CALL
assigns your category to post-call transcriptions; categories with this input type cannot be applied to streaming (real-time) transcriptions.
Specifying REAL_TIME
assigns your category to streaming transcriptions; categories with this input type cannot be applied to post-call transcriptions.
If you do not include InputType
, your category is created as a post-call category by default.
Specify additional optional settings in your request, including content redaction; allows you to apply custom language models, vocabulary filters, and custom vocabularies to your Call Analytics job.
" }, + "Tags":{ + "shape":"TagList", + "documentation":"Adds one or more custom tags, each in the form of a key:value pair, to a new call analytics job at the time you start this new job.
To learn more about using tags with Amazon Transcribe, refer to Tagging resources.
" + }, "ChannelDefinitions":{ "shape":"ChannelDefinitions", "documentation":"Makes it possible to specify which speaker is on which channel. For example, if your agent is the first participant to speak, you would set ChannelId
to 0
(to indicate the first channel) and ParticipantRole
to AGENT
(to indicate that it's the agent speaking).
Creates an agreement. An agreement is a bilateral trading partner agreement, or partnership, between an Transfer Family server and an AS2 process. The agreement defines the file and message transfer relationship between the server and the AS2 process. To define an agreement, Transfer Family combines a server, local profile, partner profile, certificate, and other attributes.
The partner is identified with the PartnerProfileId
, and the AS2 process is identified with the LocalProfileId
.
Creates an agreement. An agreement is a bilateral trading partner agreement, or partnership, between an Transfer Family server and an AS2 process. The agreement defines the file and message transfer relationship between the server and the AS2 process. To define an agreement, Transfer Family combines a server, local profile, partner profile, certificate, and other attributes.
The partner is identified with the PartnerProfileId
, and the AS2 process is identified with the LocalProfileId
.
Specify either BaseDirectory
or CustomDirectories
, but not both. Specifying both causes the command to fail.
Updates some of the parameters for an existing agreement. Provide the AgreementId
and the ServerId
for the agreement that you want to update, along with the new values for the parameters to update.
Updates some of the parameters for an existing agreement. Provide the AgreementId
and the ServerId
for the agreement that you want to update, along with the new values for the parameters to update.
Specify either BaseDirectory
or CustomDirectories
, but not both. Specifying both causes the command to fail.
If you update an agreement from using base directory to custom directories, the base directory is no longer used. Similarly, if you change from custom directories to a base directory, the custom directories are no longer used.
Determines whether or not unsigned messages from your trading partners will be accepted.
ENABLED
: Transfer Family rejects unsigned messages from your trading partner.
DISABLED
(default value): Transfer Family accepts unsigned messages from your trading partner.
A CustomDirectoriesType
structure. This structure specifies custom directories for storing various AS2 message files. You can specify directories for the following types of files.
Failed files
MDN files
Payload files
Status files
Temporary files
Specifies a location to store failed AS2 message files.
" + }, + "MdnFilesDirectory":{ + "shape":"HomeDirectory", + "documentation":"Specifies a location to store MDN files.
" + }, + "PayloadFilesDirectory":{ + "shape":"HomeDirectory", + "documentation":"Specifies a location to store the payload for AS2 message files.
" + }, + "StatusFilesDirectory":{ + "shape":"HomeDirectory", + "documentation":"Specifies a location to store AS2 status messages.
" + }, + "TemporaryFilesDirectory":{ + "shape":"HomeDirectory", + "documentation":"Specifies a location to store temporary AS2 message files.
" + } + }, + "documentation":"Contains Amazon S3 locations for storing specific types of AS2 message files.
" + }, "CustomStepDetails":{ "type":"structure", "members":{ @@ -2478,6 +2514,10 @@ "EnforceMessageSigning":{ "shape":"EnforceMessageSigningType", "documentation":"Determines whether or not unsigned messages from your trading partners will be accepted.
ENABLED
: Transfer Family rejects unsigned messages from your trading partner.
DISABLED
(default value): Transfer Family accepts unsigned messages from your trading partner.
A CustomDirectoriesType
structure. This structure specifies custom directories for storing various AS2 message files. You can specify directories for the following types of files.
Failed files
MDN files
Payload files
Status files
Temporary files
Describes the properties of an agreement.
" @@ -5406,6 +5446,10 @@ "EnforceMessageSigning":{ "shape":"EnforceMessageSigningType", "documentation":"Determines whether or not unsigned messages from your trading partners will be accepted.
ENABLED
: Transfer Family rejects unsigned messages from your trading partner.
DISABLED
(default value): Transfer Family accepts unsigned messages from your trading partner.
A CustomDirectoriesType
structure. This structure specifies custom directories for storing various AS2 message files. You can specify directories for the following types of files.
Failed files
MDN files
Payload files
Status files
Temporary files
An list of attributes that are needed to successfully evaluate an authorization request. Each attribute in this array must include a map of a data type and its value.
Example: \"contextMap\":{\"<KeyName1>\":{\"boolean\":true},\"<KeyName2>\":{\"long\":1234}}
A Cedar JSON string representation of the context needed to successfully evaluate an authorization request.
Example: {\"cedarJson\":\"{\\\"<KeyName1>\\\": true, \\\"<KeyName2>\\\": 1234}\" }
Contains additional details about the context of the request. Verified Permissions evaluates this information in an authorization request as part of the when
and unless
clauses in a policy.
This data type is used as a request parameter for the IsAuthorized, BatchIsAuthorized, and IsAuthorizedWithToken operations.
Example: \"context\":{\"contextMap\":{\"<KeyName1>\":{\"boolean\":true},\"<KeyName2>\":{\"long\":1234}}}
Contains additional details about the context of the request. Verified Permissions evaluates this information in an authorization request as part of the when
and unless
clauses in a policy.
This data type is used as a request parameter for the IsAuthorized, BatchIsAuthorized, and IsAuthorizedWithToken operations.
If you're passing context as part of the request, exactly one instance of context
must be passed. If you don't want to pass context, omit the context
parameter from your request rather than sending context {}
.
Example: \"context\":{\"contextMap\":{\"<KeyName1>\":{\"boolean\":true},\"<KeyName2>\":{\"long\":1234}}}
An array of entities that are needed to successfully evaluate an authorization request. Each entity in this array must include an identifier for the entity, the attributes of the entity, and a list of any parent entities.
" + "documentation":"An array of entities that are needed to successfully evaluate an authorization request. Each entity in this array must include an identifier for the entity, the attributes of the entity, and a list of any parent entities.
If you include multiple entities with the same identifier
, only the last one is processed in the request.
A Cedar JSON string representation of the entities needed to successfully evaluate an authorization request.
Example: {\"cedarJson\": \"[{\\\"uid\\\":{\\\"type\\\":\\\"Photo\\\",\\\"id\\\":\\\"VacationPhoto94.jpg\\\"},\\\"attrs\\\":{\\\"accessLevel\\\":\\\"public\\\"},\\\"parents\\\":[]}]\"}
Contains the list of entities to be considered during an authorization request. This includes all principals, resources, and actions required to successfully evaluate the request.
This data type is used as a field in the response parameter for the IsAuthorized and IsAuthorizedWithToken operations.
", diff --git a/botocore/data/workspaces-thin-client/2023-08-22/service-2.json b/botocore/data/workspaces-thin-client/2023-08-22/service-2.json index 0442a76e60..c35977dfe2 100644 --- a/botocore/data/workspaces-thin-client/2023-08-22/service-2.json +++ b/botocore/data/workspaces-thin-client/2023-08-22/service-2.json @@ -394,7 +394,7 @@ }, "desktopArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces, WorkSpaces Web, or AppStream 2.0.
" + "documentation":"The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces, WorkSpaces Secure Browser, or AppStream 2.0.
" }, "desktopEndpoint":{ "shape":"DesktopEndpoint", @@ -770,7 +770,7 @@ }, "desktopArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces, WorkSpaces Web, or AppStream 2.0.
" + "documentation":"The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces, WorkSpaces Secure Browser, or AppStream 2.0.
" }, "desktopEndpoint":{ "shape":"DesktopEndpoint", @@ -877,7 +877,7 @@ }, "desktopArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces, WorkSpaces Web, or AppStream 2.0.
" + "documentation":"The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces, WorkSpaces Secure Browser, or AppStream 2.0.
" }, "desktopEndpoint":{ "shape":"DesktopEndpoint", @@ -1136,6 +1136,7 @@ }, "MaintenanceWindow":{ "type":"structure", + "required":["type"], "members":{ "type":{ "shape":"MaintenanceWindowType", @@ -1521,7 +1522,7 @@ }, "desktopArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces, WorkSpaces Web, or AppStream 2.0.
" + "documentation":"The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces, WorkSpaces Secure Browser, or AppStream 2.0.
" }, "desktopEndpoint":{ "shape":"DesktopEndpoint", diff --git a/botocore/data/workspaces/2015-04-08/service-2.json b/botocore/data/workspaces/2015-04-08/service-2.json index ebc4fada5d..c583e05a31 100644 --- a/botocore/data/workspaces/2015-04-08/service-2.json +++ b/botocore/data/workspaces/2015-04-08/service-2.json @@ -2096,6 +2096,8 @@ "POWER", "GRAPHICS", "POWERPRO", + "GENERALPURPOSE_4XLARGE", + "GENERALPURPOSE_8XLARGE", "GRAPHICSPRO", "GRAPHICS_G4DN", "GRAPHICSPRO_G4DN" diff --git a/botocore/exceptions.py b/botocore/exceptions.py index 9fa0dfaa84..5f2a5a1f76 100644 --- a/botocore/exceptions.py +++ b/botocore/exceptions.py @@ -814,3 +814,12 @@ class EndpointResolutionError(EndpointProviderError): class UnknownEndpointResolutionBuiltInName(EndpointProviderError): fmt = 'Unknown builtin variable name: {name}' + + +class InvalidChecksumConfigError(BotoCoreError): + """Error when an invalid checksum config value is supplied.""" + + fmt = ( + 'Unsupported configuration value for {config_key}. ' + 'Expected one of {valid_options} but got {config_value}.' + ) diff --git a/botocore/handlers.py b/botocore/handlers.py index d0a89e97dd..29a8680a08 100644 --- a/botocore/handlers.py +++ b/botocore/handlers.py @@ -62,8 +62,6 @@ from botocore.utils import ( SAFE_CHARS, ArnParser, - conditionally_calculate_checksum, - conditionally_calculate_md5, percent_encode, switch_host_with_param, ) @@ -1294,6 +1292,33 @@ def add_query_compatibility_header(model, params, **kwargs): params['headers']['x-amzn-query-mode'] = 'true' +def _handle_request_validation_mode_member(params, model, **kwargs): + client_config = kwargs.get("context", {}).get("client_config") + if client_config is None: + return + response_checksum_validation = client_config.response_checksum_validation + http_checksum = model.http_checksum + mode_member = http_checksum.get("requestValidationModeMember") + if ( + mode_member is not None + and response_checksum_validation == "when_supported" + ): + params.setdefault(mode_member, "ENABLED") + + +def _set_extra_headers_for_unsigned_request( + request, signature_version, **kwargs +): + # When sending a checksum in the trailer of an unsigned chunked request, S3 + # requires us to set the "X-Amz-Content-SHA256" header to "STREAMING-UNSIGNED-PAYLOAD-TRAILER". + checksum_context = request.context.get("checksum", {}) + algorithm = checksum_context.get("request_algorithm", {}) + in_trailer = algorithm.get("in") == "trailer" + headers = request.headers + if signature_version == botocore.UNSIGNED and in_trailer: + headers["X-Amz-Content-SHA256"] = "STREAMING-UNSIGNED-PAYLOAD-TRAILER" + + # This is a list of (event_name, handler). # When a Session is created, everything in this list will be # automatically registered with that Session. @@ -1326,6 +1351,7 @@ def add_query_compatibility_header(model, params, **kwargs): ('before-parse.s3.*', handle_expires_header), ('before-parse.s3.*', _handle_200_error, REGISTER_FIRST), ('before-parameter-build', generate_idempotent_uuid), + ('before-parameter-build', _handle_request_validation_mode_member), ('before-parameter-build.s3', validate_bucket_name), ('before-parameter-build.s3', remove_bucket_from_url_paths_from_model), ( @@ -1359,10 +1385,7 @@ def add_query_compatibility_header(model, params, **kwargs): ('before-call.s3', add_expect_header), ('before-call.glacier', add_glacier_version), ('before-call.apigateway', add_accept_header), - ('before-call.s3.PutObject', conditionally_calculate_checksum), - ('before-call.s3.UploadPart', conditionally_calculate_md5), ('before-call.s3.DeleteObjects', escape_xml_payload), - ('before-call.s3.DeleteObjects', conditionally_calculate_checksum), ('before-call.s3.PutBucketLifecycleConfiguration', escape_xml_payload), ('before-call.glacier.UploadArchive', add_glacier_checksums), ('before-call.glacier.UploadMultipartPart', add_glacier_checksums), @@ -1399,6 +1422,7 @@ def add_query_compatibility_header(model, params, **kwargs): ('before-parameter-build.route53', fix_route53_ids), ('before-parameter-build.glacier', inject_account_id), ('before-sign.s3', remove_arn_from_signing_path), + ('before-sign.s3', _set_extra_headers_for_unsigned_request), ( 'before-sign.polly.SynthesizeSpeech', remove_content_type_header_for_presigning, diff --git a/botocore/httpchecksum.py b/botocore/httpchecksum.py index a97eb430d4..5d0a6c17eb 100644 --- a/botocore/httpchecksum.py +++ b/botocore/httpchecksum.py @@ -25,17 +25,15 @@ from binascii import crc32 from hashlib import sha1, sha256 -from botocore.compat import HAS_CRT +from botocore.compat import HAS_CRT, urlparse from botocore.exceptions import ( AwsChunkedWrapperError, FlexibleChecksumError, MissingDependencyException, ) +from botocore.model import StructureShape from botocore.response import StreamingBody -from botocore.utils import ( - conditionally_calculate_md5, - determine_content_length, -) +from botocore.utils import determine_content_length, has_checksum_header if HAS_CRT: from awscrt import checksums as crt_checksums @@ -44,6 +42,8 @@ logger = logging.getLogger(__name__) +DEFAULT_CHECKSUM_ALGORITHM = "CRC32" + class BaseChecksum: _CHUNK_SIZE = 1024 * 1024 @@ -109,6 +109,19 @@ def digest(self): return self._int_crc32c.to_bytes(4, byteorder="big") +class CrtCrc64NvmeChecksum(BaseChecksum): + # Note: This class is only used if the CRT is available + def __init__(self): + self._int_crc64nvme = 0 + + def update(self, chunk): + new_checksum = crt_checksums.crc64nvme(chunk, self._int_crc64nvme) + self._int_crc64nvme = new_checksum & 0xFFFFFFFFFFFFFFFF + + def digest(self): + return self._int_crc64nvme.to_bytes(8, byteorder="big") + + class Sha1Checksum(BaseChecksum): def __init__(self): self._checksum = sha1() @@ -246,7 +259,19 @@ def resolve_request_checksum_algorithm( params, supported_algorithms=None, ): + # If the header is already set by the customer, skip calculation + if has_checksum_header(request): + return + + checksum_context = request["context"].get("checksum", {}) + request_checksum_calculation = request["context"][ + "client_config" + ].request_checksum_calculation http_checksum = operation_model.http_checksum + request_checksum_required = ( + operation_model.http_checksum_required + or http_checksum.get("requestChecksumRequired") + ) algorithm_member = http_checksum.get("requestAlgorithmMember") if algorithm_member and algorithm_member in params: # If the client has opted into using flexible checksums and the @@ -267,35 +292,59 @@ def resolve_request_checksum_algorithm( raise FlexibleChecksumError( error_msg=f"Unsupported checksum algorithm: {algorithm_name}" ) + elif request_checksum_required or ( + algorithm_member and request_checksum_calculation == "when_supported" + ): + # Don't use a default checksum for presigned requests. + if request["context"].get("is_presign_request"): + return + algorithm_name = DEFAULT_CHECKSUM_ALGORITHM.lower() + algorithm_member_header = _get_request_algorithm_member_header( + operation_model, request, algorithm_member + ) + if algorithm_member_header is not None: + checksum_context["request_algorithm_header"] = { + "name": algorithm_member_header, + "value": DEFAULT_CHECKSUM_ALGORITHM, + } + else: + return - location_type = "header" - if operation_model.has_streaming_input: + location_type = "header" + if ( + operation_model.has_streaming_input + and urlparse(request["url"]).scheme == "https" + ): + if request["context"]["client_config"].signature_version != 's3': # Operations with streaming input must support trailers. - if request["url"].startswith("https:"): - # We only support unsigned trailer checksums currently. As this - # disables payload signing we'll only use trailers over TLS. - location_type = "trailer" - - algorithm = { - "algorithm": algorithm_name, - "in": location_type, - "name": f"x-amz-checksum-{algorithm_name}", - } + # We only support unsigned trailer checksums currently. As this + # disables payload signing we'll only use trailers over TLS. + location_type = "trailer" + + algorithm = { + "algorithm": algorithm_name, + "in": location_type, + "name": f"x-amz-checksum-{algorithm_name}", + } - if algorithm["name"] in request["headers"]: - # If the header is already set by the customer, skip calculation - return + checksum_context["request_algorithm"] = algorithm + request["context"]["checksum"] = checksum_context - checksum_context = request["context"].get("checksum", {}) - checksum_context["request_algorithm"] = algorithm - request["context"]["checksum"] = checksum_context - elif operation_model.http_checksum_required or http_checksum.get( - "requestChecksumRequired" - ): - # Otherwise apply the old http checksum behavior via Content-MD5 - checksum_context = request["context"].get("checksum", {}) - checksum_context["request_algorithm"] = "conditional-md5" - request["context"]["checksum"] = checksum_context + +def _get_request_algorithm_member_header( + operation_model, request, algorithm_member +): + """Get the name of the header targeted by the "requestAlgorithmMember".""" + operation_input_shape = operation_model.input_shape + if not isinstance(operation_input_shape, StructureShape): + return + + algorithm_member_shape = operation_input_shape.members.get( + algorithm_member + ) + + if algorithm_member_shape: + return algorithm_member_shape.serialization.get("name") def apply_request_checksum(request): @@ -305,10 +354,7 @@ def apply_request_checksum(request): if not algorithm: return - if algorithm == "conditional-md5": - # Special case to handle the http checksum required trait - conditionally_calculate_md5(request) - elif algorithm["in"] == "header": + if algorithm["in"] == "header": _apply_request_header_checksum(request) elif algorithm["in"] == "trailer": _apply_request_trailer_checksum(request) @@ -316,6 +362,11 @@ def apply_request_checksum(request): raise FlexibleChecksumError( error_msg="Unknown checksum variant: {}".format(algorithm["in"]) ) + if "request_algorithm_header" in checksum_context: + request_algorithm_header = checksum_context["request_algorithm_header"] + request["headers"][request_algorithm_header["name"]] = ( + request_algorithm_header["value"] + ) def _apply_request_header_checksum(request): @@ -358,6 +409,12 @@ def _apply_request_trailer_checksum(request): # services such as S3 may require the decoded content length headers["X-Amz-Decoded-Content-Length"] = str(content_length) + if "Content-Length" in headers: + del headers["Content-Length"] + logger.debug( + "Removing the Content-Length header since 'chunked' is specified for Transfer-Encoding." + ) + if isinstance(body, (bytes, bytearray)): body = io.BytesIO(body) @@ -465,12 +522,13 @@ def _handle_bytes_response(http_response, response, algorithm): "sha1": Sha1Checksum, "sha256": Sha256Checksum, } -_CRT_CHECKSUM_ALGORITHMS = ["crc32", "crc32c"] +_CRT_CHECKSUM_ALGORITHMS = ["crc32", "crc32c", "crc64nvme"] if HAS_CRT: # Use CRT checksum implementations if available _CRT_CHECKSUM_CLS = { "crc32": CrtCrc32Checksum, "crc32c": CrtCrc32cChecksum, + "crc64nvme": CrtCrc64NvmeChecksum, } _CHECKSUM_CLS.update(_CRT_CHECKSUM_CLS) # Validate this list isn't out of sync with _CRT_CHECKSUM_CLS keys @@ -478,4 +536,4 @@ def _handle_bytes_response(http_response, response, algorithm): name in _CRT_CHECKSUM_ALGORITHMS for name in _CRT_CHECKSUM_CLS.keys() ) _SUPPORTED_CHECKSUM_ALGORITHMS = list(_CHECKSUM_CLS.keys()) -_ALGORITHMS_PRIORITY_LIST = ['crc32c', 'crc32', 'sha1', 'sha256'] +_ALGORITHMS_PRIORITY_LIST = ['crc64nvme', 'crc32c', 'crc32', 'sha1', 'sha256'] diff --git a/botocore/utils.py b/botocore/utils.py index 86ebde80fe..30a513ea90 100644 --- a/botocore/utils.py +++ b/botocore/utils.py @@ -3228,6 +3228,7 @@ def get_encoding_from_headers(headers, default='ISO-8859-1'): def calculate_md5(body, **kwargs): + """This function has been deprecated, but is kept for backwards compatibility.""" if isinstance(body, (bytes, bytearray)): binary_md5 = _calculate_md5_from_bytes(body) else: @@ -3236,11 +3237,13 @@ def calculate_md5(body, **kwargs): def _calculate_md5_from_bytes(body_bytes): + """This function has been deprecated, but is kept for backwards compatibility.""" md5 = get_md5(body_bytes) return md5.digest() def _calculate_md5_from_file(fileobj): + """This function has been deprecated, but is kept for backwards compatibility.""" start_position = fileobj.tell() md5 = get_md5() for chunk in iter(lambda: fileobj.read(1024 * 1024), b''): @@ -3256,15 +3259,17 @@ def _is_s3express_request(params): return endpoint_properties.get('backend') == 'S3Express' -def _has_checksum_header(params): +def has_checksum_header(params): + """ + Checks if a header starting with "x-amz-checksum-" is provided in a request. + + This function is considered private and subject to abrupt breaking changes or + removal without prior announcement. Please do not use it directly. + """ headers = params['headers'] - # If a user provided Content-MD5 is present, - # don't try to compute a new one. - if 'Content-MD5' in headers: - return True # If a header matching the x-amz-checksum-* pattern is present, we - # assume a checksum has already been provided and an md5 is not needed + # assume a checksum has already been provided by the user. for header in headers: if CHECKSUM_HEADER_PATTERN.match(header): return True @@ -3273,12 +3278,14 @@ def _has_checksum_header(params): def conditionally_calculate_checksum(params, **kwargs): - if not _has_checksum_header(params): + """This function has been deprecated, but is kept for backwards compatibility.""" + if not has_checksum_header(params): conditionally_calculate_md5(params, **kwargs) conditionally_enable_crc32(params, **kwargs) def conditionally_enable_crc32(params, **kwargs): + """This function has been deprecated, but is kept for backwards compatibility.""" checksum_context = params.get('context', {}).get('checksum', {}) checksum_algorithm = checksum_context.get('request_algorithm') if ( @@ -3296,7 +3303,10 @@ def conditionally_enable_crc32(params, **kwargs): def conditionally_calculate_md5(params, **kwargs): - """Only add a Content-MD5 if the system supports it.""" + """Only add a Content-MD5 if the system supports it. + + This function has been deprecated, but is kept for backwards compatibility. + """ body = params['body'] checksum_context = params.get('context', {}).get('checksum', {}) checksum_algorithm = checksum_context.get('request_algorithm') @@ -3304,7 +3314,7 @@ def conditionally_calculate_md5(params, **kwargs): # Skip for requests that will have a flexible checksum applied return - if _has_checksum_header(params): + if has_checksum_header(params): # Don't add a new header if one is already available. return diff --git a/docs/source/_static/css/custom.css b/docs/source/_static/css/custom.css index 5be72283d4..537e5e243e 100644 --- a/docs/source/_static/css/custom.css +++ b/docs/source/_static/css/custom.css @@ -1,34 +1,37 @@ /* Prevents two-dimensional scrolling and content loss. */ -h1, code, li { +h1, +code, +li { overflow-wrap: break-word; } /* Provides padding to push down the "breadcrumb" navigation in nested pages. */ -.content{ +.content { padding: 1em 3em; } /* Improves spacing around custom sidebar section*/ -.sidebar-div{ +.sidebar-div { margin: var(--sidebar-caption-space-above) 0 0 0; - padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal); + padding: var(--sidebar-item-spacing-vertical) + var(--sidebar-item-spacing-horizontal); } /* Custom sidebar heading text. Example: Feedback Section heading. */ -.sidebar-heading{ +.sidebar-heading { color: var(--color-sidebar-caption-text); font-size: var(--font-size--normal); font-weight: 700; } /* Improves text used in custom sidebar section. Example: Feedback section content.*/ -.sidebar-text{ +.sidebar-text { color: var(--color-sidebar-caption-text); font-size: var(--sidebar-item-font-size); line-height: 1.4; } /* Removes empty space above the sidebar-tree (under "Feedback" section) */ -.sidebar-tree{ +.sidebar-tree { margin-top: 0px; } /* Adds padding around AWS Logo in the left sidebar. */ -.sidebar-logo{ +.sidebar-logo { padding: 20% 15%; } /* Hides a div by default. */ @@ -52,7 +55,7 @@ h1, code, li { visibility: hidden; } /* Hides the icon by default and applies relevant styling. */ -.nav-close-icon{ +.nav-close-icon { color: var(--color-foreground-secondary); display: flex; align-items: center; @@ -73,24 +76,24 @@ h1, code, li { } } @media (max-width: 82em) { - /* Displays a div on a medium screen. */ + /* Displays a div on a medium screen. */ .show-div-md { display: flex; } } /* Apply furo styled admonition titles for